aboutsummaryrefslogtreecommitdiffstats
path: root/vendor/github.com/dexon-foundation/mcl/src/asm/x86.s
diff options
context:
space:
mode:
Diffstat (limited to 'vendor/github.com/dexon-foundation/mcl/src/asm/x86.s')
-rw-r--r--vendor/github.com/dexon-foundation/mcl/src/asm/x86.s73785
1 files changed, 73785 insertions, 0 deletions
diff --git a/vendor/github.com/dexon-foundation/mcl/src/asm/x86.s b/vendor/github.com/dexon-foundation/mcl/src/asm/x86.s
new file mode 100644
index 000000000..cdd988ad3
--- /dev/null
+++ b/vendor/github.com/dexon-foundation/mcl/src/asm/x86.s
@@ -0,0 +1,73785 @@
+ .text
+ .file "<stdin>"
+ .globl makeNIST_P192L
+ .align 16, 0x90
+ .type makeNIST_P192L,@function
+makeNIST_P192L: # @makeNIST_P192L
+# BB#0:
+ movl 4(%esp), %eax
+ movl $-1, 20(%eax)
+ movl $-1, 16(%eax)
+ movl $-1, 12(%eax)
+ movl $-2, 8(%eax)
+ movl $-1, 4(%eax)
+ movl $-1, (%eax)
+ retl $4
+.Lfunc_end0:
+ .size makeNIST_P192L, .Lfunc_end0-makeNIST_P192L
+
+ .globl mcl_fpDbl_mod_NIST_P192L
+ .align 16, 0x90
+ .type mcl_fpDbl_mod_NIST_P192L,@function
+mcl_fpDbl_mod_NIST_P192L: # @mcl_fpDbl_mod_NIST_P192L
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $32, %esp
+ movl 56(%esp), %eax
+ movl 32(%eax), %esi
+ movl %esi, 12(%esp) # 4-byte Spill
+ movl 24(%eax), %ecx
+ movl %ecx, 20(%esp) # 4-byte Spill
+ movl 28(%eax), %edi
+ movl %edi, 16(%esp) # 4-byte Spill
+ xorl %edx, %edx
+ movl (%eax), %ebx
+ addl %ecx, %ebx
+ movl %ebx, 24(%esp) # 4-byte Spill
+ movl 4(%eax), %ecx
+ adcl %edi, %ecx
+ movl %ecx, 28(%esp) # 4-byte Spill
+ movl 8(%eax), %ebp
+ adcl %esi, %ebp
+ movl 36(%eax), %ecx
+ movl %ecx, 8(%esp) # 4-byte Spill
+ movl 12(%eax), %esi
+ adcl %ecx, %esi
+ movl 40(%eax), %ebx
+ movl %ebx, 4(%esp) # 4-byte Spill
+ movl 16(%eax), %ecx
+ adcl %ebx, %ecx
+ movl 44(%eax), %edi
+ movl %edi, (%esp) # 4-byte Spill
+ movl 20(%eax), %eax
+ adcl %edi, %eax
+ adcl $0, %edx
+ sbbl %edi, %edi
+ andl $1, %edi
+ addl %ebx, 24(%esp) # 4-byte Folded Spill
+ movl (%esp), %ebx # 4-byte Reload
+ adcl %ebx, 28(%esp) # 4-byte Folded Spill
+ adcl 20(%esp), %ebp # 4-byte Folded Reload
+ adcl 16(%esp), %esi # 4-byte Folded Reload
+ adcl 12(%esp), %ecx # 4-byte Folded Reload
+ adcl 8(%esp), %eax # 4-byte Folded Reload
+ adcl $0, %edx
+ adcl $0, %edi
+ addl 4(%esp), %ebp # 4-byte Folded Reload
+ adcl %ebx, %esi
+ adcl $0, %ecx
+ adcl $0, %eax
+ adcl $0, %edx
+ adcl $0, %edi
+ addl %edx, 24(%esp) # 4-byte Folded Spill
+ adcl %edi, 28(%esp) # 4-byte Folded Spill
+ adcl %ebp, %edx
+ movl %edx, 20(%esp) # 4-byte Spill
+ adcl %esi, %edi
+ adcl $0, %ecx
+ adcl $0, %eax
+ sbbl %ebx, %ebx
+ andl $1, %ebx
+ movl 24(%esp), %esi # 4-byte Reload
+ addl $1, %esi
+ movl 28(%esp), %ebp # 4-byte Reload
+ adcl $0, %ebp
+ adcl $1, %edx
+ movl %edx, 8(%esp) # 4-byte Spill
+ movl %edi, %edx
+ adcl $0, %edx
+ movl %edx, 12(%esp) # 4-byte Spill
+ movl %ecx, %edx
+ adcl $0, %edx
+ movl %edx, 16(%esp) # 4-byte Spill
+ movl %eax, %edx
+ adcl $0, %edx
+ adcl $-1, %ebx
+ andl $1, %ebx
+ jne .LBB1_2
+# BB#1:
+ movl %edx, %eax
+.LBB1_2:
+ testb %bl, %bl
+ movl 24(%esp), %edx # 4-byte Reload
+ jne .LBB1_4
+# BB#3:
+ movl %esi, %edx
+.LBB1_4:
+ movl 52(%esp), %esi
+ movl %edx, (%esi)
+ movl 20(%esp), %edx # 4-byte Reload
+ movl 28(%esp), %ebx # 4-byte Reload
+ jne .LBB1_6
+# BB#5:
+ movl %ebp, %ebx
+.LBB1_6:
+ movl %ebx, 4(%esi)
+ jne .LBB1_8
+# BB#7:
+ movl 8(%esp), %edx # 4-byte Reload
+.LBB1_8:
+ movl %edx, 8(%esi)
+ jne .LBB1_10
+# BB#9:
+ movl 12(%esp), %edi # 4-byte Reload
+.LBB1_10:
+ movl %edi, 12(%esi)
+ jne .LBB1_12
+# BB#11:
+ movl 16(%esp), %ecx # 4-byte Reload
+.LBB1_12:
+ movl %ecx, 16(%esi)
+ movl %eax, 20(%esi)
+ addl $32, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end1:
+ .size mcl_fpDbl_mod_NIST_P192L, .Lfunc_end1-mcl_fpDbl_mod_NIST_P192L
+
+ .globl mcl_fp_sqr_NIST_P192L
+ .align 16, 0x90
+ .type mcl_fp_sqr_NIST_P192L,@function
+mcl_fp_sqr_NIST_P192L: # @mcl_fp_sqr_NIST_P192L
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $92, %esp
+ calll .L2$pb
+.L2$pb:
+ popl %ebx
+.Ltmp0:
+ addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp0-.L2$pb), %ebx
+ movl 116(%esp), %eax
+ movl %eax, 4(%esp)
+ leal 44(%esp), %eax
+ movl %eax, (%esp)
+ calll mcl_fpDbl_sqrPre6L@PLT
+ xorl %edi, %edi
+ movl 76(%esp), %ecx
+ movl %ecx, 24(%esp) # 4-byte Spill
+ movl 68(%esp), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 72(%esp), %edx
+ movl %edx, 28(%esp) # 4-byte Spill
+ movl 44(%esp), %esi
+ addl %eax, %esi
+ movl %esi, 36(%esp) # 4-byte Spill
+ movl 48(%esp), %eax
+ adcl %edx, %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 52(%esp), %ebp
+ adcl %ecx, %ebp
+ movl 80(%esp), %eax
+ movl %eax, 20(%esp) # 4-byte Spill
+ movl 56(%esp), %esi
+ adcl %eax, %esi
+ movl 84(%esp), %ebx
+ movl %ebx, 16(%esp) # 4-byte Spill
+ movl 60(%esp), %ecx
+ adcl %ebx, %ecx
+ movl 88(%esp), %eax
+ movl %eax, 12(%esp) # 4-byte Spill
+ movl 64(%esp), %edx
+ adcl %eax, %edx
+ adcl $0, %edi
+ sbbl %eax, %eax
+ andl $1, %eax
+ addl %ebx, 36(%esp) # 4-byte Folded Spill
+ movl 12(%esp), %ebx # 4-byte Reload
+ adcl %ebx, 40(%esp) # 4-byte Folded Spill
+ adcl 32(%esp), %ebp # 4-byte Folded Reload
+ adcl 28(%esp), %esi # 4-byte Folded Reload
+ adcl 24(%esp), %ecx # 4-byte Folded Reload
+ adcl 20(%esp), %edx # 4-byte Folded Reload
+ adcl $0, %edi
+ adcl $0, %eax
+ addl 16(%esp), %ebp # 4-byte Folded Reload
+ adcl %ebx, %esi
+ adcl $0, %ecx
+ adcl $0, %edx
+ adcl $0, %edi
+ adcl $0, %eax
+ addl %edi, 36(%esp) # 4-byte Folded Spill
+ adcl %eax, 40(%esp) # 4-byte Folded Spill
+ adcl %ebp, %edi
+ adcl %esi, %eax
+ adcl $0, %ecx
+ adcl $0, %edx
+ sbbl %ebx, %ebx
+ andl $1, %ebx
+ movl 36(%esp), %esi # 4-byte Reload
+ addl $1, %esi
+ movl 40(%esp), %ebp # 4-byte Reload
+ adcl $0, %ebp
+ movl %ebp, 20(%esp) # 4-byte Spill
+ movl %edi, %ebp
+ adcl $1, %ebp
+ movl %ebp, 24(%esp) # 4-byte Spill
+ movl %eax, %ebp
+ adcl $0, %ebp
+ movl %ebp, 28(%esp) # 4-byte Spill
+ movl %ecx, %ebp
+ adcl $0, %ebp
+ movl %ebp, 32(%esp) # 4-byte Spill
+ movl %edx, %ebp
+ adcl $0, %ebp
+ adcl $-1, %ebx
+ andl $1, %ebx
+ jne .LBB2_2
+# BB#1:
+ movl %ebp, %edx
+.LBB2_2:
+ testb %bl, %bl
+ movl 36(%esp), %ebx # 4-byte Reload
+ jne .LBB2_4
+# BB#3:
+ movl %esi, %ebx
+.LBB2_4:
+ movl 112(%esp), %esi
+ movl %ebx, (%esi)
+ movl 40(%esp), %ebx # 4-byte Reload
+ jne .LBB2_6
+# BB#5:
+ movl 20(%esp), %ebx # 4-byte Reload
+.LBB2_6:
+ movl %ebx, 4(%esi)
+ jne .LBB2_8
+# BB#7:
+ movl 24(%esp), %edi # 4-byte Reload
+.LBB2_8:
+ movl %edi, 8(%esi)
+ jne .LBB2_10
+# BB#9:
+ movl 28(%esp), %eax # 4-byte Reload
+.LBB2_10:
+ movl %eax, 12(%esi)
+ jne .LBB2_12
+# BB#11:
+ movl 32(%esp), %ecx # 4-byte Reload
+.LBB2_12:
+ movl %ecx, 16(%esi)
+ movl %edx, 20(%esi)
+ addl $92, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end2:
+ .size mcl_fp_sqr_NIST_P192L, .Lfunc_end2-mcl_fp_sqr_NIST_P192L
+
+ .globl mcl_fp_mulNIST_P192L
+ .align 16, 0x90
+ .type mcl_fp_mulNIST_P192L,@function
+mcl_fp_mulNIST_P192L: # @mcl_fp_mulNIST_P192L
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $92, %esp
+ calll .L3$pb
+.L3$pb:
+ popl %ebx
+.Ltmp1:
+ addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp1-.L3$pb), %ebx
+ movl 120(%esp), %eax
+ movl %eax, 8(%esp)
+ movl 116(%esp), %eax
+ movl %eax, 4(%esp)
+ leal 44(%esp), %eax
+ movl %eax, (%esp)
+ calll mcl_fpDbl_mulPre6L@PLT
+ xorl %edi, %edi
+ movl 76(%esp), %ecx
+ movl %ecx, 24(%esp) # 4-byte Spill
+ movl 68(%esp), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 72(%esp), %edx
+ movl %edx, 28(%esp) # 4-byte Spill
+ movl 44(%esp), %esi
+ addl %eax, %esi
+ movl %esi, 36(%esp) # 4-byte Spill
+ movl 48(%esp), %eax
+ adcl %edx, %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 52(%esp), %ebp
+ adcl %ecx, %ebp
+ movl 80(%esp), %eax
+ movl %eax, 20(%esp) # 4-byte Spill
+ movl 56(%esp), %esi
+ adcl %eax, %esi
+ movl 84(%esp), %ebx
+ movl %ebx, 16(%esp) # 4-byte Spill
+ movl 60(%esp), %ecx
+ adcl %ebx, %ecx
+ movl 88(%esp), %eax
+ movl %eax, 12(%esp) # 4-byte Spill
+ movl 64(%esp), %edx
+ adcl %eax, %edx
+ adcl $0, %edi
+ sbbl %eax, %eax
+ andl $1, %eax
+ addl %ebx, 36(%esp) # 4-byte Folded Spill
+ movl 12(%esp), %ebx # 4-byte Reload
+ adcl %ebx, 40(%esp) # 4-byte Folded Spill
+ adcl 32(%esp), %ebp # 4-byte Folded Reload
+ adcl 28(%esp), %esi # 4-byte Folded Reload
+ adcl 24(%esp), %ecx # 4-byte Folded Reload
+ adcl 20(%esp), %edx # 4-byte Folded Reload
+ adcl $0, %edi
+ adcl $0, %eax
+ addl 16(%esp), %ebp # 4-byte Folded Reload
+ adcl %ebx, %esi
+ adcl $0, %ecx
+ adcl $0, %edx
+ adcl $0, %edi
+ adcl $0, %eax
+ addl %edi, 36(%esp) # 4-byte Folded Spill
+ adcl %eax, 40(%esp) # 4-byte Folded Spill
+ adcl %ebp, %edi
+ adcl %esi, %eax
+ adcl $0, %ecx
+ adcl $0, %edx
+ sbbl %ebx, %ebx
+ andl $1, %ebx
+ movl 36(%esp), %esi # 4-byte Reload
+ addl $1, %esi
+ movl 40(%esp), %ebp # 4-byte Reload
+ adcl $0, %ebp
+ movl %ebp, 20(%esp) # 4-byte Spill
+ movl %edi, %ebp
+ adcl $1, %ebp
+ movl %ebp, 24(%esp) # 4-byte Spill
+ movl %eax, %ebp
+ adcl $0, %ebp
+ movl %ebp, 28(%esp) # 4-byte Spill
+ movl %ecx, %ebp
+ adcl $0, %ebp
+ movl %ebp, 32(%esp) # 4-byte Spill
+ movl %edx, %ebp
+ adcl $0, %ebp
+ adcl $-1, %ebx
+ andl $1, %ebx
+ jne .LBB3_2
+# BB#1:
+ movl %ebp, %edx
+.LBB3_2:
+ testb %bl, %bl
+ movl 36(%esp), %ebx # 4-byte Reload
+ jne .LBB3_4
+# BB#3:
+ movl %esi, %ebx
+.LBB3_4:
+ movl 112(%esp), %esi
+ movl %ebx, (%esi)
+ movl 40(%esp), %ebx # 4-byte Reload
+ jne .LBB3_6
+# BB#5:
+ movl 20(%esp), %ebx # 4-byte Reload
+.LBB3_6:
+ movl %ebx, 4(%esi)
+ jne .LBB3_8
+# BB#7:
+ movl 24(%esp), %edi # 4-byte Reload
+.LBB3_8:
+ movl %edi, 8(%esi)
+ jne .LBB3_10
+# BB#9:
+ movl 28(%esp), %eax # 4-byte Reload
+.LBB3_10:
+ movl %eax, 12(%esi)
+ jne .LBB3_12
+# BB#11:
+ movl 32(%esp), %ecx # 4-byte Reload
+.LBB3_12:
+ movl %ecx, 16(%esi)
+ movl %edx, 20(%esi)
+ addl $92, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end3:
+ .size mcl_fp_mulNIST_P192L, .Lfunc_end3-mcl_fp_mulNIST_P192L
+
+ .globl mcl_fpDbl_mod_NIST_P521L
+ .align 16, 0x90
+ .type mcl_fpDbl_mod_NIST_P521L,@function
+mcl_fpDbl_mod_NIST_P521L: # @mcl_fpDbl_mod_NIST_P521L
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $60, %esp
+ movl 84(%esp), %ecx
+ movl 124(%ecx), %edx
+ movl 128(%ecx), %esi
+ movl %esi, %eax
+ shldl $23, %edx, %eax
+ movl %eax, 12(%esp) # 4-byte Spill
+ movl 120(%ecx), %eax
+ shldl $23, %eax, %edx
+ movl %edx, 56(%esp) # 4-byte Spill
+ movl 116(%ecx), %edx
+ shldl $23, %edx, %eax
+ movl %eax, 20(%esp) # 4-byte Spill
+ movl 112(%ecx), %eax
+ shldl $23, %eax, %edx
+ movl %edx, 52(%esp) # 4-byte Spill
+ movl 108(%ecx), %edx
+ shldl $23, %edx, %eax
+ movl %eax, 24(%esp) # 4-byte Spill
+ movl 104(%ecx), %eax
+ shldl $23, %eax, %edx
+ movl %edx, 48(%esp) # 4-byte Spill
+ movl 100(%ecx), %edx
+ shldl $23, %edx, %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 96(%ecx), %eax
+ shldl $23, %eax, %edx
+ movl %edx, 40(%esp) # 4-byte Spill
+ movl 92(%ecx), %edx
+ shldl $23, %edx, %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 88(%ecx), %eax
+ shldl $23, %eax, %edx
+ movl %edx, 32(%esp) # 4-byte Spill
+ movl 84(%ecx), %edi
+ shldl $23, %edi, %eax
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl 80(%ecx), %edx
+ shldl $23, %edx, %edi
+ movl 76(%ecx), %eax
+ shldl $23, %eax, %edx
+ movl 72(%ecx), %ebx
+ shldl $23, %ebx, %eax
+ movl 68(%ecx), %ebp
+ shldl $23, %ebp, %ebx
+ shrl $9, %esi
+ movl %esi, 8(%esp) # 4-byte Spill
+ movl 64(%ecx), %esi
+ shldl $23, %esi, %ebp
+ andl $511, %esi # imm = 0x1FF
+ addl (%ecx), %ebp
+ adcl 4(%ecx), %ebx
+ movl %ebx, 16(%esp) # 4-byte Spill
+ adcl 8(%ecx), %eax
+ adcl 12(%ecx), %edx
+ adcl 16(%ecx), %edi
+ movl 28(%esp), %ebx # 4-byte Reload
+ adcl 20(%ecx), %ebx
+ movl %ebx, 28(%esp) # 4-byte Spill
+ movl 32(%esp), %ebx # 4-byte Reload
+ adcl 24(%ecx), %ebx
+ movl %ebx, 32(%esp) # 4-byte Spill
+ movl 36(%esp), %ebx # 4-byte Reload
+ adcl 28(%ecx), %ebx
+ movl %ebx, 36(%esp) # 4-byte Spill
+ movl 40(%esp), %ebx # 4-byte Reload
+ adcl 32(%ecx), %ebx
+ movl %ebx, 40(%esp) # 4-byte Spill
+ movl 44(%esp), %ebx # 4-byte Reload
+ adcl 36(%ecx), %ebx
+ movl %ebx, 44(%esp) # 4-byte Spill
+ movl 48(%esp), %ebx # 4-byte Reload
+ adcl 40(%ecx), %ebx
+ movl %ebx, 48(%esp) # 4-byte Spill
+ movl 24(%esp), %ebx # 4-byte Reload
+ adcl 44(%ecx), %ebx
+ movl %ebx, 24(%esp) # 4-byte Spill
+ movl 52(%esp), %ebx # 4-byte Reload
+ adcl 48(%ecx), %ebx
+ movl %ebx, 52(%esp) # 4-byte Spill
+ movl 20(%esp), %ebx # 4-byte Reload
+ adcl 52(%ecx), %ebx
+ movl %ebx, 20(%esp) # 4-byte Spill
+ movl 56(%esp), %ebx # 4-byte Reload
+ adcl 56(%ecx), %ebx
+ movl %ebx, 56(%esp) # 4-byte Spill
+ movl 12(%esp), %ebx # 4-byte Reload
+ adcl 60(%ecx), %ebx
+ adcl 8(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 12(%esp) # 4-byte Spill
+ movl %esi, %ecx
+ shrl $9, %ecx
+ andl $1, %ecx
+ addl %ebp, %ecx
+ adcl $0, 16(%esp) # 4-byte Folded Spill
+ adcl $0, %eax
+ movl %eax, (%esp) # 4-byte Spill
+ adcl $0, %edx
+ movl %edx, 4(%esp) # 4-byte Spill
+ adcl $0, %edi
+ movl %edi, 8(%esp) # 4-byte Spill
+ movl %edi, %esi
+ adcl $0, 28(%esp) # 4-byte Folded Spill
+ adcl $0, 32(%esp) # 4-byte Folded Spill
+ adcl $0, 36(%esp) # 4-byte Folded Spill
+ adcl $0, 40(%esp) # 4-byte Folded Spill
+ adcl $0, 44(%esp) # 4-byte Folded Spill
+ adcl $0, 48(%esp) # 4-byte Folded Spill
+ adcl $0, 24(%esp) # 4-byte Folded Spill
+ adcl $0, 52(%esp) # 4-byte Folded Spill
+ adcl $0, 20(%esp) # 4-byte Folded Spill
+ adcl $0, 56(%esp) # 4-byte Folded Spill
+ movl %ebx, %ebp
+ adcl $0, %ebp
+ movl 12(%esp), %ebx # 4-byte Reload
+ adcl $0, %ebx
+ movl %ecx, %edi
+ andl %eax, %edi
+ andl %edx, %edi
+ andl %esi, %edi
+ andl 28(%esp), %edi # 4-byte Folded Reload
+ andl 32(%esp), %edi # 4-byte Folded Reload
+ andl 36(%esp), %edi # 4-byte Folded Reload
+ andl 40(%esp), %edi # 4-byte Folded Reload
+ andl 44(%esp), %edi # 4-byte Folded Reload
+ andl 48(%esp), %edi # 4-byte Folded Reload
+ andl 24(%esp), %edi # 4-byte Folded Reload
+ andl 52(%esp), %edi # 4-byte Folded Reload
+ movl 20(%esp), %esi # 4-byte Reload
+ andl %esi, %edi
+ andl 56(%esp), %edi # 4-byte Folded Reload
+ movl %ebx, %edx
+ movl 16(%esp), %ebx # 4-byte Reload
+ andl %ebp, %edi
+ movl %ebp, %eax
+ movl %edx, %ebp
+ orl $-512, %ebp # imm = 0xFFFFFFFFFFFFFE00
+ andl %edi, %ebp
+ andl %ebx, %ebp
+ cmpl $-1, %ebp
+ movl 80(%esp), %edi
+ je .LBB4_1
+# BB#3: # %nonzero
+ movl %ecx, (%edi)
+ movl %ebx, 4(%edi)
+ movl (%esp), %ecx # 4-byte Reload
+ movl %ecx, 8(%edi)
+ movl 4(%esp), %ecx # 4-byte Reload
+ movl %ecx, 12(%edi)
+ movl 8(%esp), %ecx # 4-byte Reload
+ movl %ecx, 16(%edi)
+ movl 28(%esp), %ecx # 4-byte Reload
+ movl %ecx, 20(%edi)
+ movl 32(%esp), %ecx # 4-byte Reload
+ movl %ecx, 24(%edi)
+ movl 36(%esp), %ecx # 4-byte Reload
+ movl %ecx, 28(%edi)
+ movl 40(%esp), %ecx # 4-byte Reload
+ movl %ecx, 32(%edi)
+ movl 44(%esp), %ecx # 4-byte Reload
+ movl %ecx, 36(%edi)
+ movl 48(%esp), %ecx # 4-byte Reload
+ movl %ecx, 40(%edi)
+ movl 24(%esp), %ecx # 4-byte Reload
+ movl %ecx, 44(%edi)
+ movl 52(%esp), %ecx # 4-byte Reload
+ movl %ecx, 48(%edi)
+ movl %esi, 52(%edi)
+ movl 56(%esp), %ecx # 4-byte Reload
+ movl %ecx, 56(%edi)
+ movl %eax, 60(%edi)
+ andl $511, %edx # imm = 0x1FF
+ movl %edx, 64(%edi)
+ jmp .LBB4_2
+.LBB4_1: # %zero
+ xorl %eax, %eax
+ movl $17, %ecx
+ rep;stosl
+.LBB4_2: # %zero
+ addl $60, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end4:
+ .size mcl_fpDbl_mod_NIST_P521L, .Lfunc_end4-mcl_fpDbl_mod_NIST_P521L
+
+ .globl mcl_fp_mulUnitPre1L
+ .align 16, 0x90
+ .type mcl_fp_mulUnitPre1L,@function
+mcl_fp_mulUnitPre1L: # @mcl_fp_mulUnitPre1L
+# BB#0:
+ movl 8(%esp), %eax
+ movl (%eax), %eax
+ mull 12(%esp)
+ movl 4(%esp), %ecx
+ movl %eax, (%ecx)
+ movl %edx, 4(%ecx)
+ retl
+.Lfunc_end5:
+ .size mcl_fp_mulUnitPre1L, .Lfunc_end5-mcl_fp_mulUnitPre1L
+
+ .globl mcl_fpDbl_mulPre1L
+ .align 16, 0x90
+ .type mcl_fpDbl_mulPre1L,@function
+mcl_fpDbl_mulPre1L: # @mcl_fpDbl_mulPre1L
+# BB#0:
+ movl 12(%esp), %eax
+ movl (%eax), %eax
+ movl 8(%esp), %ecx
+ mull (%ecx)
+ movl 4(%esp), %ecx
+ movl %eax, (%ecx)
+ movl %edx, 4(%ecx)
+ retl
+.Lfunc_end6:
+ .size mcl_fpDbl_mulPre1L, .Lfunc_end6-mcl_fpDbl_mulPre1L
+
+ .globl mcl_fpDbl_sqrPre1L
+ .align 16, 0x90
+ .type mcl_fpDbl_sqrPre1L,@function
+mcl_fpDbl_sqrPre1L: # @mcl_fpDbl_sqrPre1L
+# BB#0:
+ movl 8(%esp), %eax
+ movl (%eax), %eax
+ mull %eax
+ movl 4(%esp), %ecx
+ movl %eax, (%ecx)
+ movl %edx, 4(%ecx)
+ retl
+.Lfunc_end7:
+ .size mcl_fpDbl_sqrPre1L, .Lfunc_end7-mcl_fpDbl_sqrPre1L
+
+ .globl mcl_fp_mont1L
+ .align 16, 0x90
+ .type mcl_fp_mont1L,@function
+mcl_fp_mont1L: # @mcl_fp_mont1L
+# BB#0:
+ pushl %edi
+ pushl %esi
+ movl 16(%esp), %eax
+ movl (%eax), %eax
+ movl 20(%esp), %ecx
+ mull (%ecx)
+ movl %eax, %ecx
+ movl %edx, %esi
+ movl 24(%esp), %edx
+ movl -4(%edx), %eax
+ imull %ecx, %eax
+ movl (%edx), %edi
+ mull %edi
+ addl %ecx, %eax
+ adcl %esi, %edx
+ sbbl %ecx, %ecx
+ andl $1, %ecx
+ movl %edx, %eax
+ subl %edi, %eax
+ sbbl $0, %ecx
+ testb $1, %cl
+ jne .LBB8_2
+# BB#1:
+ movl %eax, %edx
+.LBB8_2:
+ movl 12(%esp), %eax
+ movl %edx, (%eax)
+ popl %esi
+ popl %edi
+ retl
+.Lfunc_end8:
+ .size mcl_fp_mont1L, .Lfunc_end8-mcl_fp_mont1L
+
+ .globl mcl_fp_montNF1L
+ .align 16, 0x90
+ .type mcl_fp_montNF1L,@function
+mcl_fp_montNF1L: # @mcl_fp_montNF1L
+# BB#0:
+ pushl %edi
+ pushl %esi
+ movl 16(%esp), %eax
+ movl (%eax), %eax
+ movl 20(%esp), %ecx
+ mull (%ecx)
+ movl %eax, %ecx
+ movl %edx, %esi
+ movl 24(%esp), %edx
+ movl -4(%edx), %eax
+ imull %ecx, %eax
+ movl (%edx), %edi
+ mull %edi
+ addl %ecx, %eax
+ adcl %esi, %edx
+ movl %edx, %eax
+ subl %edi, %eax
+ js .LBB9_2
+# BB#1:
+ movl %eax, %edx
+.LBB9_2:
+ movl 12(%esp), %eax
+ movl %edx, (%eax)
+ popl %esi
+ popl %edi
+ retl
+.Lfunc_end9:
+ .size mcl_fp_montNF1L, .Lfunc_end9-mcl_fp_montNF1L
+
+ .globl mcl_fp_montRed1L
+ .align 16, 0x90
+ .type mcl_fp_montRed1L,@function
+mcl_fp_montRed1L: # @mcl_fp_montRed1L
+# BB#0:
+ pushl %edi
+ pushl %esi
+ movl 16(%esp), %ecx
+ movl (%ecx), %esi
+ movl 20(%esp), %edx
+ movl -4(%edx), %eax
+ imull %esi, %eax
+ movl (%edx), %edi
+ mull %edi
+ addl %esi, %eax
+ adcl 4(%ecx), %edx
+ sbbl %ecx, %ecx
+ andl $1, %ecx
+ movl %edx, %eax
+ subl %edi, %eax
+ sbbl $0, %ecx
+ testb $1, %cl
+ jne .LBB10_2
+# BB#1:
+ movl %eax, %edx
+.LBB10_2:
+ movl 12(%esp), %eax
+ movl %edx, (%eax)
+ popl %esi
+ popl %edi
+ retl
+.Lfunc_end10:
+ .size mcl_fp_montRed1L, .Lfunc_end10-mcl_fp_montRed1L
+
+ .globl mcl_fp_addPre1L
+ .align 16, 0x90
+ .type mcl_fp_addPre1L,@function
+mcl_fp_addPre1L: # @mcl_fp_addPre1L
+# BB#0:
+ movl 12(%esp), %eax
+ movl (%eax), %eax
+ movl 4(%esp), %ecx
+ movl 8(%esp), %edx
+ addl (%edx), %eax
+ movl %eax, (%ecx)
+ sbbl %eax, %eax
+ andl $1, %eax
+ retl
+.Lfunc_end11:
+ .size mcl_fp_addPre1L, .Lfunc_end11-mcl_fp_addPre1L
+
+ .globl mcl_fp_subPre1L
+ .align 16, 0x90
+ .type mcl_fp_subPre1L,@function
+mcl_fp_subPre1L: # @mcl_fp_subPre1L
+# BB#0:
+ pushl %esi
+ movl 12(%esp), %eax
+ movl (%eax), %ecx
+ xorl %eax, %eax
+ movl 8(%esp), %edx
+ movl 16(%esp), %esi
+ subl (%esi), %ecx
+ movl %ecx, (%edx)
+ sbbl $0, %eax
+ andl $1, %eax
+ popl %esi
+ retl
+.Lfunc_end12:
+ .size mcl_fp_subPre1L, .Lfunc_end12-mcl_fp_subPre1L
+
+ .globl mcl_fp_shr1_1L
+ .align 16, 0x90
+ .type mcl_fp_shr1_1L,@function
+mcl_fp_shr1_1L: # @mcl_fp_shr1_1L
+# BB#0:
+ movl 8(%esp), %eax
+ movl (%eax), %eax
+ shrl %eax
+ movl 4(%esp), %ecx
+ movl %eax, (%ecx)
+ retl
+.Lfunc_end13:
+ .size mcl_fp_shr1_1L, .Lfunc_end13-mcl_fp_shr1_1L
+
+ .globl mcl_fp_add1L
+ .align 16, 0x90
+ .type mcl_fp_add1L,@function
+mcl_fp_add1L: # @mcl_fp_add1L
+# BB#0:
+ pushl %esi
+ movl 16(%esp), %eax
+ movl (%eax), %eax
+ movl 8(%esp), %ecx
+ movl 12(%esp), %edx
+ addl (%edx), %eax
+ movl %eax, (%ecx)
+ sbbl %edx, %edx
+ andl $1, %edx
+ movl 20(%esp), %esi
+ subl (%esi), %eax
+ sbbl $0, %edx
+ testb $1, %dl
+ jne .LBB14_2
+# BB#1: # %nocarry
+ movl %eax, (%ecx)
+.LBB14_2: # %carry
+ popl %esi
+ retl
+.Lfunc_end14:
+ .size mcl_fp_add1L, .Lfunc_end14-mcl_fp_add1L
+
+ .globl mcl_fp_addNF1L
+ .align 16, 0x90
+ .type mcl_fp_addNF1L,@function
+mcl_fp_addNF1L: # @mcl_fp_addNF1L
+# BB#0:
+ movl 12(%esp), %eax
+ movl (%eax), %eax
+ movl 8(%esp), %ecx
+ addl (%ecx), %eax
+ movl 16(%esp), %edx
+ movl %eax, %ecx
+ subl (%edx), %ecx
+ js .LBB15_2
+# BB#1:
+ movl %ecx, %eax
+.LBB15_2:
+ movl 4(%esp), %ecx
+ movl %eax, (%ecx)
+ retl
+.Lfunc_end15:
+ .size mcl_fp_addNF1L, .Lfunc_end15-mcl_fp_addNF1L
+
+ .globl mcl_fp_sub1L
+ .align 16, 0x90
+ .type mcl_fp_sub1L,@function
+mcl_fp_sub1L: # @mcl_fp_sub1L
+# BB#0:
+ pushl %esi
+ movl 12(%esp), %eax
+ movl (%eax), %eax
+ xorl %edx, %edx
+ movl 8(%esp), %ecx
+ movl 16(%esp), %esi
+ subl (%esi), %eax
+ movl %eax, (%ecx)
+ sbbl $0, %edx
+ testb $1, %dl
+ jne .LBB16_2
+# BB#1: # %nocarry
+ popl %esi
+ retl
+.LBB16_2: # %carry
+ movl 20(%esp), %edx
+ addl (%edx), %eax
+ movl %eax, (%ecx)
+ popl %esi
+ retl
+.Lfunc_end16:
+ .size mcl_fp_sub1L, .Lfunc_end16-mcl_fp_sub1L
+
+ .globl mcl_fp_subNF1L
+ .align 16, 0x90
+ .type mcl_fp_subNF1L,@function
+mcl_fp_subNF1L: # @mcl_fp_subNF1L
+# BB#0:
+ movl 8(%esp), %eax
+ movl (%eax), %eax
+ movl 12(%esp), %ecx
+ subl (%ecx), %eax
+ movl %eax, %ecx
+ sarl $31, %ecx
+ movl 16(%esp), %edx
+ andl (%edx), %ecx
+ addl %eax, %ecx
+ movl 4(%esp), %eax
+ movl %ecx, (%eax)
+ retl
+.Lfunc_end17:
+ .size mcl_fp_subNF1L, .Lfunc_end17-mcl_fp_subNF1L
+
+ .globl mcl_fpDbl_add1L
+ .align 16, 0x90
+ .type mcl_fpDbl_add1L,@function
+mcl_fpDbl_add1L: # @mcl_fpDbl_add1L
+# BB#0:
+ pushl %ebx
+ pushl %esi
+ movl 20(%esp), %eax
+ movl (%eax), %edx
+ movl 4(%eax), %eax
+ movl 16(%esp), %esi
+ addl (%esi), %edx
+ movl 12(%esp), %ecx
+ adcl 4(%esi), %eax
+ movl %edx, (%ecx)
+ sbbl %ebx, %ebx
+ andl $1, %ebx
+ movl 24(%esp), %esi
+ movl %eax, %edx
+ subl (%esi), %edx
+ sbbl $0, %ebx
+ testb $1, %bl
+ jne .LBB18_2
+# BB#1:
+ movl %edx, %eax
+.LBB18_2:
+ movl %eax, 4(%ecx)
+ popl %esi
+ popl %ebx
+ retl
+.Lfunc_end18:
+ .size mcl_fpDbl_add1L, .Lfunc_end18-mcl_fpDbl_add1L
+
+ .globl mcl_fpDbl_sub1L
+ .align 16, 0x90
+ .type mcl_fpDbl_sub1L,@function
+mcl_fpDbl_sub1L: # @mcl_fpDbl_sub1L
+# BB#0:
+ pushl %esi
+ movl 12(%esp), %eax
+ movl (%eax), %esi
+ movl 4(%eax), %eax
+ xorl %ecx, %ecx
+ movl 16(%esp), %edx
+ subl (%edx), %esi
+ sbbl 4(%edx), %eax
+ movl 8(%esp), %edx
+ movl %esi, (%edx)
+ sbbl $0, %ecx
+ andl $1, %ecx
+ je .LBB19_2
+# BB#1:
+ movl 20(%esp), %ecx
+ movl (%ecx), %ecx
+.LBB19_2:
+ addl %eax, %ecx
+ movl %ecx, 4(%edx)
+ popl %esi
+ retl
+.Lfunc_end19:
+ .size mcl_fpDbl_sub1L, .Lfunc_end19-mcl_fpDbl_sub1L
+
+ .globl mcl_fp_mulUnitPre2L
+ .align 16, 0x90
+ .type mcl_fp_mulUnitPre2L,@function
+mcl_fp_mulUnitPre2L: # @mcl_fp_mulUnitPre2L
+# BB#0:
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ movl 24(%esp), %ecx
+ movl 20(%esp), %ebx
+ movl %ecx, %eax
+ mull 4(%ebx)
+ movl %edx, %esi
+ movl %eax, %edi
+ movl %ecx, %eax
+ mull (%ebx)
+ movl 16(%esp), %ecx
+ movl %eax, (%ecx)
+ addl %edi, %edx
+ movl %edx, 4(%ecx)
+ adcl $0, %esi
+ movl %esi, 8(%ecx)
+ popl %esi
+ popl %edi
+ popl %ebx
+ retl
+.Lfunc_end20:
+ .size mcl_fp_mulUnitPre2L, .Lfunc_end20-mcl_fp_mulUnitPre2L
+
+ .globl mcl_fpDbl_mulPre2L
+ .align 16, 0x90
+ .type mcl_fpDbl_mulPre2L,@function
+mcl_fpDbl_mulPre2L: # @mcl_fpDbl_mulPre2L
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $8, %esp
+ movl 32(%esp), %eax
+ movl (%eax), %ecx
+ movl 4(%eax), %edi
+ movl 36(%esp), %ebx
+ movl (%ebx), %esi
+ movl %ecx, %eax
+ mull %esi
+ movl %edx, %ebp
+ movl 28(%esp), %edx
+ movl %eax, (%edx)
+ movl 4(%ebx), %ebx
+ movl %edi, %eax
+ mull %ebx
+ movl %edx, 4(%esp) # 4-byte Spill
+ movl %eax, (%esp) # 4-byte Spill
+ movl %ecx, %eax
+ mull %ebx
+ movl %edx, %ecx
+ movl %eax, %ebx
+ movl %edi, %eax
+ mull %esi
+ addl %ebp, %eax
+ adcl $0, %edx
+ addl %ebx, %eax
+ movl 28(%esp), %esi
+ movl %eax, 4(%esi)
+ adcl (%esp), %edx # 4-byte Folded Reload
+ sbbl %eax, %eax
+ andl $1, %eax
+ addl %ecx, %edx
+ movl %edx, 8(%esi)
+ adcl 4(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 12(%esi)
+ addl $8, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end21:
+ .size mcl_fpDbl_mulPre2L, .Lfunc_end21-mcl_fpDbl_mulPre2L
+
+ .globl mcl_fpDbl_sqrPre2L
+ .align 16, 0x90
+ .type mcl_fpDbl_sqrPre2L,@function
+mcl_fpDbl_sqrPre2L: # @mcl_fpDbl_sqrPre2L
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ movl 24(%esp), %eax
+ movl (%eax), %ecx
+ movl 4(%eax), %esi
+ movl %esi, %eax
+ mull %esi
+ movl %edx, %edi
+ movl %eax, %ebx
+ movl %esi, %eax
+ mull %ecx
+ movl %edx, %esi
+ movl %eax, %ebp
+ movl %ecx, %eax
+ mull %ecx
+ movl 20(%esp), %ecx
+ movl %eax, (%ecx)
+ addl %ebp, %edx
+ movl %esi, %eax
+ adcl $0, %eax
+ addl %ebp, %edx
+ movl %edx, 4(%ecx)
+ adcl %ebx, %eax
+ sbbl %edx, %edx
+ andl $1, %edx
+ addl %esi, %eax
+ movl %eax, 8(%ecx)
+ adcl %edi, %edx
+ movl %edx, 12(%ecx)
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end22:
+ .size mcl_fpDbl_sqrPre2L, .Lfunc_end22-mcl_fpDbl_sqrPre2L
+
+ .globl mcl_fp_mont2L
+ .align 16, 0x90
+ .type mcl_fp_mont2L,@function
+mcl_fp_mont2L: # @mcl_fp_mont2L
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $32, %esp
+ movl 56(%esp), %ecx
+ movl (%ecx), %eax
+ movl %eax, 12(%esp) # 4-byte Spill
+ movl 4(%ecx), %ecx
+ movl %ecx, 16(%esp) # 4-byte Spill
+ movl 60(%esp), %ecx
+ movl (%ecx), %esi
+ mull %esi
+ movl %eax, 8(%esp) # 4-byte Spill
+ movl %edx, 4(%esp) # 4-byte Spill
+ movl 64(%esp), %edx
+ movl -4(%edx), %ecx
+ movl %ecx, 28(%esp) # 4-byte Spill
+ movl %eax, %ebp
+ imull %ecx, %ebp
+ movl (%edx), %ecx
+ movl %ecx, 20(%esp) # 4-byte Spill
+ movl 4(%edx), %edx
+ movl %edx, 24(%esp) # 4-byte Spill
+ movl %ebp, %eax
+ mull %edx
+ movl %edx, %ebx
+ movl %eax, (%esp) # 4-byte Spill
+ movl %ebp, %eax
+ mull %ecx
+ movl %edx, %ebp
+ movl %eax, %edi
+ movl 16(%esp), %eax # 4-byte Reload
+ mull %esi
+ addl 4(%esp), %eax # 4-byte Folded Reload
+ adcl $0, %edx
+ addl (%esp), %ebp # 4-byte Folded Reload
+ adcl $0, %ebx
+ addl 8(%esp), %edi # 4-byte Folded Reload
+ adcl %eax, %ebp
+ adcl %edx, %ebx
+ movl 60(%esp), %eax
+ movl 4(%eax), %ecx
+ sbbl %eax, %eax
+ andl $1, %eax
+ movl %eax, 8(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ mull 16(%esp) # 4-byte Folded Reload
+ movl %edx, %edi
+ movl %eax, 16(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ mull 12(%esp) # 4-byte Folded Reload
+ movl %eax, %ecx
+ movl %edx, %esi
+ addl 16(%esp), %esi # 4-byte Folded Reload
+ adcl $0, %edi
+ addl %ebp, %ecx
+ adcl %ebx, %esi
+ adcl 8(%esp), %edi # 4-byte Folded Reload
+ sbbl %ebx, %ebx
+ movl 28(%esp), %eax # 4-byte Reload
+ imull %ecx, %eax
+ movl %eax, 28(%esp) # 4-byte Spill
+ andl $1, %ebx
+ mull 20(%esp) # 4-byte Folded Reload
+ movl %edx, 16(%esp) # 4-byte Spill
+ movl %eax, %ebp
+ movl 28(%esp), %eax # 4-byte Reload
+ mull 24(%esp) # 4-byte Folded Reload
+ addl 16(%esp), %eax # 4-byte Folded Reload
+ adcl $0, %edx
+ addl %ecx, %ebp
+ adcl %esi, %eax
+ adcl %edi, %edx
+ adcl $0, %ebx
+ movl %eax, %esi
+ subl 20(%esp), %esi # 4-byte Folded Reload
+ movl %edx, %ecx
+ sbbl 24(%esp), %ecx # 4-byte Folded Reload
+ sbbl $0, %ebx
+ andl $1, %ebx
+ jne .LBB23_2
+# BB#1:
+ movl %esi, %eax
+.LBB23_2:
+ movl 52(%esp), %esi
+ movl %eax, (%esi)
+ testb %bl, %bl
+ jne .LBB23_4
+# BB#3:
+ movl %ecx, %edx
+.LBB23_4:
+ movl %edx, 4(%esi)
+ addl $32, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end23:
+ .size mcl_fp_mont2L, .Lfunc_end23-mcl_fp_mont2L
+
+ .globl mcl_fp_montNF2L
+ .align 16, 0x90
+ .type mcl_fp_montNF2L,@function
+mcl_fp_montNF2L: # @mcl_fp_montNF2L
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $36, %esp
+ movl 60(%esp), %ecx
+ movl (%ecx), %eax
+ movl %eax, 20(%esp) # 4-byte Spill
+ movl 4(%ecx), %ecx
+ movl %ecx, 12(%esp) # 4-byte Spill
+ movl 64(%esp), %ecx
+ movl (%ecx), %ebp
+ mull %ebp
+ movl %eax, %ebx
+ movl %edx, 8(%esp) # 4-byte Spill
+ movl 68(%esp), %eax
+ movl -4(%eax), %ecx
+ movl %ecx, 24(%esp) # 4-byte Spill
+ movl %ebx, %edi
+ imull %ecx, %edi
+ movl (%eax), %ecx
+ movl %ecx, 32(%esp) # 4-byte Spill
+ movl 4(%eax), %edx
+ movl %edx, 28(%esp) # 4-byte Spill
+ movl %edi, %eax
+ mull %edx
+ movl %edx, 16(%esp) # 4-byte Spill
+ movl %eax, (%esp) # 4-byte Spill
+ movl %edi, %eax
+ mull %ecx
+ movl %edx, 4(%esp) # 4-byte Spill
+ movl %eax, %esi
+ movl 12(%esp), %ecx # 4-byte Reload
+ movl %ecx, %eax
+ mull %ebp
+ movl %edx, %edi
+ movl %eax, %ebp
+ addl 8(%esp), %ebp # 4-byte Folded Reload
+ adcl $0, %edi
+ addl %ebx, %esi
+ adcl (%esp), %ebp # 4-byte Folded Reload
+ adcl $0, %edi
+ addl 4(%esp), %ebp # 4-byte Folded Reload
+ adcl 16(%esp), %edi # 4-byte Folded Reload
+ movl 64(%esp), %eax
+ movl 4(%eax), %ebx
+ movl %ebx, %eax
+ mull %ecx
+ movl %edx, %esi
+ movl %eax, 16(%esp) # 4-byte Spill
+ movl %ebx, %eax
+ mull 20(%esp) # 4-byte Folded Reload
+ movl %eax, %ebx
+ movl %edx, %ecx
+ addl 16(%esp), %ecx # 4-byte Folded Reload
+ adcl $0, %esi
+ addl %ebp, %ebx
+ adcl %edi, %ecx
+ adcl $0, %esi
+ movl 24(%esp), %eax # 4-byte Reload
+ imull %ebx, %eax
+ movl %eax, %edi
+ mull 32(%esp) # 4-byte Folded Reload
+ movl %edx, 24(%esp) # 4-byte Spill
+ movl %eax, %ebp
+ movl %edi, %eax
+ movl 28(%esp), %edi # 4-byte Reload
+ mull %edi
+ addl %ebx, %ebp
+ adcl %ecx, %eax
+ adcl $0, %esi
+ addl 24(%esp), %eax # 4-byte Folded Reload
+ adcl %edx, %esi
+ movl %eax, %edx
+ subl 32(%esp), %edx # 4-byte Folded Reload
+ movl %esi, %ecx
+ sbbl %edi, %ecx
+ testl %ecx, %ecx
+ js .LBB24_2
+# BB#1:
+ movl %edx, %eax
+.LBB24_2:
+ movl 56(%esp), %edx
+ movl %eax, (%edx)
+ js .LBB24_4
+# BB#3:
+ movl %ecx, %esi
+.LBB24_4:
+ movl %esi, 4(%edx)
+ addl $36, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end24:
+ .size mcl_fp_montNF2L, .Lfunc_end24-mcl_fp_montNF2L
+
+ .globl mcl_fp_montRed2L
+ .align 16, 0x90
+ .type mcl_fp_montRed2L,@function
+mcl_fp_montRed2L: # @mcl_fp_montRed2L
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $16, %esp
+ movl 44(%esp), %eax
+ movl -4(%eax), %ecx
+ movl (%eax), %ebx
+ movl %ebx, 8(%esp) # 4-byte Spill
+ movl 40(%esp), %edx
+ movl (%edx), %ebp
+ movl %ebp, %edi
+ imull %ecx, %edi
+ movl 4(%eax), %edx
+ movl %edx, 12(%esp) # 4-byte Spill
+ movl %edi, %eax
+ mull %edx
+ movl %edx, %esi
+ movl %eax, 4(%esp) # 4-byte Spill
+ movl %edi, %eax
+ mull %ebx
+ movl %edx, %edi
+ addl 4(%esp), %edi # 4-byte Folded Reload
+ adcl $0, %esi
+ addl %ebp, %eax
+ movl 40(%esp), %edx
+ movl 12(%edx), %eax
+ adcl 4(%edx), %edi
+ adcl 8(%edx), %esi
+ adcl $0, %eax
+ movl %eax, 4(%esp) # 4-byte Spill
+ sbbl %ebx, %ebx
+ imull %edi, %ecx
+ andl $1, %ebx
+ movl %ecx, %eax
+ mull 8(%esp) # 4-byte Folded Reload
+ movl %edx, (%esp) # 4-byte Spill
+ movl %eax, %ebp
+ movl %ecx, %eax
+ mull 12(%esp) # 4-byte Folded Reload
+ addl (%esp), %eax # 4-byte Folded Reload
+ adcl $0, %edx
+ addl %edi, %ebp
+ adcl %esi, %eax
+ adcl 4(%esp), %edx # 4-byte Folded Reload
+ adcl $0, %ebx
+ movl %eax, %esi
+ subl 8(%esp), %esi # 4-byte Folded Reload
+ movl %edx, %ecx
+ sbbl 12(%esp), %ecx # 4-byte Folded Reload
+ sbbl $0, %ebx
+ andl $1, %ebx
+ jne .LBB25_2
+# BB#1:
+ movl %esi, %eax
+.LBB25_2:
+ movl 36(%esp), %esi
+ movl %eax, (%esi)
+ testb %bl, %bl
+ jne .LBB25_4
+# BB#3:
+ movl %ecx, %edx
+.LBB25_4:
+ movl %edx, 4(%esi)
+ addl $16, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end25:
+ .size mcl_fp_montRed2L, .Lfunc_end25-mcl_fp_montRed2L
+
+ .globl mcl_fp_addPre2L
+ .align 16, 0x90
+ .type mcl_fp_addPre2L,@function
+mcl_fp_addPre2L: # @mcl_fp_addPre2L
+# BB#0:
+ pushl %esi
+ movl 16(%esp), %eax
+ movl (%eax), %ecx
+ movl 4(%eax), %eax
+ movl 12(%esp), %edx
+ addl (%edx), %ecx
+ movl 8(%esp), %esi
+ adcl 4(%edx), %eax
+ movl %ecx, (%esi)
+ movl %eax, 4(%esi)
+ sbbl %eax, %eax
+ andl $1, %eax
+ popl %esi
+ retl
+.Lfunc_end26:
+ .size mcl_fp_addPre2L, .Lfunc_end26-mcl_fp_addPre2L
+
+ .globl mcl_fp_subPre2L
+ .align 16, 0x90
+ .type mcl_fp_subPre2L,@function
+mcl_fp_subPre2L: # @mcl_fp_subPre2L
+# BB#0:
+ pushl %esi
+ movl 12(%esp), %eax
+ movl (%eax), %ecx
+ movl 4(%eax), %edx
+ xorl %eax, %eax
+ movl 16(%esp), %esi
+ subl (%esi), %ecx
+ sbbl 4(%esi), %edx
+ movl 8(%esp), %esi
+ movl %ecx, (%esi)
+ movl %edx, 4(%esi)
+ sbbl $0, %eax
+ andl $1, %eax
+ popl %esi
+ retl
+.Lfunc_end27:
+ .size mcl_fp_subPre2L, .Lfunc_end27-mcl_fp_subPre2L
+
+ .globl mcl_fp_shr1_2L
+ .align 16, 0x90
+ .type mcl_fp_shr1_2L,@function
+mcl_fp_shr1_2L: # @mcl_fp_shr1_2L
+# BB#0:
+ movl 8(%esp), %eax
+ movl (%eax), %ecx
+ movl 4(%eax), %eax
+ shrdl $1, %eax, %ecx
+ movl 4(%esp), %edx
+ movl %ecx, (%edx)
+ shrl %eax
+ movl %eax, 4(%edx)
+ retl
+.Lfunc_end28:
+ .size mcl_fp_shr1_2L, .Lfunc_end28-mcl_fp_shr1_2L
+
+ .globl mcl_fp_add2L
+ .align 16, 0x90
+ .type mcl_fp_add2L,@function
+mcl_fp_add2L: # @mcl_fp_add2L
+# BB#0:
+ pushl %ebx
+ pushl %esi
+ movl 20(%esp), %ecx
+ movl (%ecx), %eax
+ movl 4(%ecx), %ecx
+ movl 16(%esp), %esi
+ addl (%esi), %eax
+ movl 12(%esp), %edx
+ adcl 4(%esi), %ecx
+ movl %eax, (%edx)
+ movl %ecx, 4(%edx)
+ sbbl %ebx, %ebx
+ andl $1, %ebx
+ movl 24(%esp), %esi
+ subl (%esi), %eax
+ sbbl 4(%esi), %ecx
+ sbbl $0, %ebx
+ testb $1, %bl
+ jne .LBB29_2
+# BB#1: # %nocarry
+ movl %eax, (%edx)
+ movl %ecx, 4(%edx)
+.LBB29_2: # %carry
+ popl %esi
+ popl %ebx
+ retl
+.Lfunc_end29:
+ .size mcl_fp_add2L, .Lfunc_end29-mcl_fp_add2L
+
+ .globl mcl_fp_addNF2L
+ .align 16, 0x90
+ .type mcl_fp_addNF2L,@function
+mcl_fp_addNF2L: # @mcl_fp_addNF2L
+# BB#0:
+ pushl %edi
+ pushl %esi
+ movl 20(%esp), %eax
+ movl (%eax), %ecx
+ movl 4(%eax), %eax
+ movl 16(%esp), %edx
+ addl (%edx), %ecx
+ adcl 4(%edx), %eax
+ movl 24(%esp), %edi
+ movl %ecx, %esi
+ subl (%edi), %esi
+ movl %eax, %edx
+ sbbl 4(%edi), %edx
+ testl %edx, %edx
+ js .LBB30_2
+# BB#1:
+ movl %esi, %ecx
+.LBB30_2:
+ movl 12(%esp), %esi
+ movl %ecx, (%esi)
+ js .LBB30_4
+# BB#3:
+ movl %edx, %eax
+.LBB30_4:
+ movl %eax, 4(%esi)
+ popl %esi
+ popl %edi
+ retl
+.Lfunc_end30:
+ .size mcl_fp_addNF2L, .Lfunc_end30-mcl_fp_addNF2L
+
+ .globl mcl_fp_sub2L
+ .align 16, 0x90
+ .type mcl_fp_sub2L,@function
+mcl_fp_sub2L: # @mcl_fp_sub2L
+# BB#0:
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ movl 20(%esp), %eax
+ movl (%eax), %ecx
+ movl 4(%eax), %eax
+ xorl %ebx, %ebx
+ movl 24(%esp), %edx
+ subl (%edx), %ecx
+ sbbl 4(%edx), %eax
+ movl 16(%esp), %edx
+ movl %ecx, (%edx)
+ movl %eax, 4(%edx)
+ sbbl $0, %ebx
+ testb $1, %bl
+ je .LBB31_2
+# BB#1: # %carry
+ movl 28(%esp), %esi
+ movl 4(%esi), %edi
+ addl (%esi), %ecx
+ movl %ecx, (%edx)
+ adcl %eax, %edi
+ movl %edi, 4(%edx)
+.LBB31_2: # %nocarry
+ popl %esi
+ popl %edi
+ popl %ebx
+ retl
+.Lfunc_end31:
+ .size mcl_fp_sub2L, .Lfunc_end31-mcl_fp_sub2L
+
+ .globl mcl_fp_subNF2L
+ .align 16, 0x90
+ .type mcl_fp_subNF2L,@function
+mcl_fp_subNF2L: # @mcl_fp_subNF2L
+# BB#0:
+ pushl %edi
+ pushl %esi
+ movl 16(%esp), %eax
+ movl (%eax), %ecx
+ movl 4(%eax), %eax
+ movl 20(%esp), %edx
+ subl (%edx), %ecx
+ sbbl 4(%edx), %eax
+ movl %eax, %edx
+ sarl $31, %edx
+ movl 24(%esp), %esi
+ movl 4(%esi), %edi
+ andl %edx, %edi
+ andl (%esi), %edx
+ addl %ecx, %edx
+ movl 12(%esp), %ecx
+ movl %edx, (%ecx)
+ adcl %eax, %edi
+ movl %edi, 4(%ecx)
+ popl %esi
+ popl %edi
+ retl
+.Lfunc_end32:
+ .size mcl_fp_subNF2L, .Lfunc_end32-mcl_fp_subNF2L
+
+ .globl mcl_fpDbl_add2L
+ .align 16, 0x90
+ .type mcl_fpDbl_add2L,@function
+mcl_fpDbl_add2L: # @mcl_fpDbl_add2L
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ movl 28(%esp), %edx
+ movl 12(%edx), %esi
+ movl 24(%esp), %edi
+ movl 12(%edi), %eax
+ movl 8(%edx), %ecx
+ movl (%edx), %ebx
+ movl 4(%edx), %ebp
+ addl (%edi), %ebx
+ adcl 4(%edi), %ebp
+ movl 20(%esp), %edx
+ adcl 8(%edi), %ecx
+ movl %ebx, (%edx)
+ movl %ebp, 4(%edx)
+ adcl %esi, %eax
+ sbbl %ebx, %ebx
+ andl $1, %ebx
+ movl 32(%esp), %ebp
+ movl %ecx, %esi
+ subl (%ebp), %esi
+ movl %eax, %edi
+ sbbl 4(%ebp), %edi
+ sbbl $0, %ebx
+ andl $1, %ebx
+ jne .LBB33_2
+# BB#1:
+ movl %edi, %eax
+.LBB33_2:
+ testb %bl, %bl
+ jne .LBB33_4
+# BB#3:
+ movl %esi, %ecx
+.LBB33_4:
+ movl %ecx, 8(%edx)
+ movl %eax, 12(%edx)
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end33:
+ .size mcl_fpDbl_add2L, .Lfunc_end33-mcl_fpDbl_add2L
+
+ .globl mcl_fpDbl_sub2L
+ .align 16, 0x90
+ .type mcl_fpDbl_sub2L,@function
+mcl_fpDbl_sub2L: # @mcl_fpDbl_sub2L
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ movl 24(%esp), %ecx
+ movl (%ecx), %esi
+ movl 4(%ecx), %edi
+ xorl %ebx, %ebx
+ movl 28(%esp), %edx
+ subl (%edx), %esi
+ sbbl 4(%edx), %edi
+ movl 8(%ecx), %eax
+ sbbl 8(%edx), %eax
+ movl 12(%edx), %ebp
+ movl 12(%ecx), %edx
+ movl 20(%esp), %ecx
+ movl %esi, (%ecx)
+ movl %edi, 4(%ecx)
+ sbbl %ebp, %edx
+ movl 32(%esp), %edi
+ movl (%edi), %esi
+ sbbl $0, %ebx
+ andl $1, %ebx
+ jne .LBB34_1
+# BB#2:
+ xorl %edi, %edi
+ jmp .LBB34_3
+.LBB34_1:
+ movl 4(%edi), %edi
+.LBB34_3:
+ testb %bl, %bl
+ jne .LBB34_5
+# BB#4:
+ xorl %esi, %esi
+.LBB34_5:
+ addl %eax, %esi
+ movl %esi, 8(%ecx)
+ adcl %edx, %edi
+ movl %edi, 12(%ecx)
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end34:
+ .size mcl_fpDbl_sub2L, .Lfunc_end34-mcl_fpDbl_sub2L
+
+ .globl mcl_fp_mulUnitPre3L
+ .align 16, 0x90
+ .type mcl_fp_mulUnitPre3L,@function
+mcl_fp_mulUnitPre3L: # @mcl_fp_mulUnitPre3L
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ pushl %eax
+ movl 32(%esp), %ecx
+ movl 28(%esp), %edi
+ movl %ecx, %eax
+ mull 8(%edi)
+ movl %edx, %esi
+ movl %eax, (%esp) # 4-byte Spill
+ movl %ecx, %eax
+ mull 4(%edi)
+ movl %edx, %ebx
+ movl %eax, %ebp
+ movl %ecx, %eax
+ mull (%edi)
+ movl 24(%esp), %ecx
+ movl %eax, (%ecx)
+ addl %ebp, %edx
+ movl %edx, 4(%ecx)
+ adcl (%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 8(%ecx)
+ adcl $0, %esi
+ movl %esi, 12(%ecx)
+ addl $4, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end35:
+ .size mcl_fp_mulUnitPre3L, .Lfunc_end35-mcl_fp_mulUnitPre3L
+
+ .globl mcl_fpDbl_mulPre3L
+ .align 16, 0x90
+ .type mcl_fpDbl_mulPre3L,@function
+mcl_fpDbl_mulPre3L: # @mcl_fpDbl_mulPre3L
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $28, %esp
+ movl 52(%esp), %ecx
+ movl (%ecx), %eax
+ movl %eax, 24(%esp) # 4-byte Spill
+ movl 56(%esp), %edx
+ movl (%edx), %edi
+ mull %edi
+ movl %edx, 20(%esp) # 4-byte Spill
+ movl 48(%esp), %edx
+ movl %eax, (%edx)
+ movl 4(%ecx), %ebp
+ movl %ebp, (%esp) # 4-byte Spill
+ movl 8(%ecx), %esi
+ movl %esi, 8(%esp) # 4-byte Spill
+ movl %esi, %eax
+ mull %edi
+ movl %edx, %ecx
+ movl %eax, 16(%esp) # 4-byte Spill
+ movl %ebp, %eax
+ mull %edi
+ movl %edx, %edi
+ movl %eax, %ebx
+ addl 20(%esp), %ebx # 4-byte Folded Reload
+ adcl 16(%esp), %edi # 4-byte Folded Reload
+ adcl $0, %ecx
+ movl %ecx, 12(%esp) # 4-byte Spill
+ movl 56(%esp), %eax
+ movl 4(%eax), %ecx
+ movl %esi, %eax
+ mull %ecx
+ movl %edx, 20(%esp) # 4-byte Spill
+ movl %eax, %esi
+ movl %ebp, %eax
+ mull %ecx
+ movl %edx, 16(%esp) # 4-byte Spill
+ movl %eax, %ebp
+ movl 24(%esp), %eax # 4-byte Reload
+ mull %ecx
+ movl %edx, 4(%esp) # 4-byte Spill
+ addl %ebx, %eax
+ movl 48(%esp), %ecx
+ movl %eax, 4(%ecx)
+ adcl %edi, %ebp
+ adcl 12(%esp), %esi # 4-byte Folded Reload
+ movl 56(%esp), %eax
+ movl 8(%eax), %edi
+ sbbl %ecx, %ecx
+ movl (%esp), %eax # 4-byte Reload
+ mull %edi
+ movl %edx, 12(%esp) # 4-byte Spill
+ movl %eax, (%esp) # 4-byte Spill
+ movl 24(%esp), %eax # 4-byte Reload
+ mull %edi
+ movl %edx, 24(%esp) # 4-byte Spill
+ movl %eax, %ebx
+ movl 8(%esp), %eax # 4-byte Reload
+ mull %edi
+ andl $1, %ecx
+ addl 4(%esp), %ebp # 4-byte Folded Reload
+ adcl 16(%esp), %esi # 4-byte Folded Reload
+ adcl 20(%esp), %ecx # 4-byte Folded Reload
+ addl %ebx, %ebp
+ movl 48(%esp), %edi
+ movl %ebp, 8(%edi)
+ adcl (%esp), %esi # 4-byte Folded Reload
+ adcl %eax, %ecx
+ sbbl %eax, %eax
+ andl $1, %eax
+ addl 24(%esp), %esi # 4-byte Folded Reload
+ adcl 12(%esp), %ecx # 4-byte Folded Reload
+ movl %esi, 12(%edi)
+ movl %ecx, 16(%edi)
+ adcl %edx, %eax
+ movl %eax, 20(%edi)
+ addl $28, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end36:
+ .size mcl_fpDbl_mulPre3L, .Lfunc_end36-mcl_fpDbl_mulPre3L
+
+ .globl mcl_fpDbl_sqrPre3L
+ .align 16, 0x90
+ .type mcl_fpDbl_sqrPre3L,@function
+mcl_fpDbl_sqrPre3L: # @mcl_fpDbl_sqrPre3L
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $32, %esp
+ movl 56(%esp), %eax
+ movl 8(%eax), %ebp
+ movl (%eax), %ecx
+ movl 4(%eax), %esi
+ movl %ebp, %eax
+ mull %esi
+ movl %edx, 28(%esp) # 4-byte Spill
+ movl %eax, 20(%esp) # 4-byte Spill
+ movl %ebp, %eax
+ mull %ecx
+ movl %edx, 4(%esp) # 4-byte Spill
+ movl %eax, %ebx
+ movl %ebx, (%esp) # 4-byte Spill
+ movl %esi, %eax
+ mull %esi
+ movl %edx, 16(%esp) # 4-byte Spill
+ movl %eax, 8(%esp) # 4-byte Spill
+ movl %esi, %eax
+ mull %ecx
+ movl %edx, %esi
+ movl %eax, %edi
+ movl %ecx, %eax
+ mull %ecx
+ movl %edx, %ecx
+ movl 52(%esp), %edx
+ movl %eax, (%edx)
+ movl %ebp, %eax
+ mull %ebp
+ movl %eax, 12(%esp) # 4-byte Spill
+ movl %edx, 24(%esp) # 4-byte Spill
+ addl %edi, %ecx
+ movl %esi, %ebp
+ adcl %ebx, %ebp
+ movl 4(%esp), %ebx # 4-byte Reload
+ movl %ebx, %eax
+ adcl $0, %eax
+ addl %edi, %ecx
+ movl 52(%esp), %edx
+ movl %ecx, 4(%edx)
+ adcl 8(%esp), %ebp # 4-byte Folded Reload
+ movl 20(%esp), %edx # 4-byte Reload
+ adcl %edx, %eax
+ sbbl %ecx, %ecx
+ andl $1, %ecx
+ addl %esi, %ebp
+ adcl 16(%esp), %eax # 4-byte Folded Reload
+ movl 28(%esp), %edi # 4-byte Reload
+ adcl %edi, %ecx
+ addl (%esp), %ebp # 4-byte Folded Reload
+ movl 52(%esp), %esi
+ movl %ebp, 8(%esi)
+ adcl %edx, %eax
+ adcl 12(%esp), %ecx # 4-byte Folded Reload
+ sbbl %esi, %esi
+ andl $1, %esi
+ addl %ebx, %eax
+ adcl %edi, %ecx
+ movl 52(%esp), %edx
+ movl %eax, 12(%edx)
+ movl %ecx, 16(%edx)
+ adcl 24(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 20(%edx)
+ addl $32, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end37:
+ .size mcl_fpDbl_sqrPre3L, .Lfunc_end37-mcl_fpDbl_sqrPre3L
+
+ .globl mcl_fp_mont3L
+ .align 16, 0x90
+ .type mcl_fp_mont3L,@function
+mcl_fp_mont3L: # @mcl_fp_mont3L
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $56, %esp
+ movl 80(%esp), %ecx
+ movl (%ecx), %eax
+ movl %eax, 24(%esp) # 4-byte Spill
+ movl 84(%esp), %edx
+ movl (%edx), %edx
+ movl %edx, 12(%esp) # 4-byte Spill
+ mull %edx
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl %edx, 16(%esp) # 4-byte Spill
+ movl 88(%esp), %esi
+ movl -4(%esi), %edx
+ movl %edx, 52(%esp) # 4-byte Spill
+ movl %eax, %ebp
+ imull %edx, %ebp
+ movl (%esi), %edi
+ movl %edi, 40(%esp) # 4-byte Spill
+ movl 8(%esi), %edx
+ movl %edx, 48(%esp) # 4-byte Spill
+ movl 4(%esi), %ebx
+ movl %ebx, 44(%esp) # 4-byte Spill
+ movl 4(%ecx), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 8(%ecx), %ecx
+ movl %ecx, 20(%esp) # 4-byte Spill
+ movl %ebp, %eax
+ mull %edx
+ movl %edx, 28(%esp) # 4-byte Spill
+ movl %eax, 8(%esp) # 4-byte Spill
+ movl %ebp, %eax
+ mull %ebx
+ movl %edx, %ebx
+ movl %eax, (%esp) # 4-byte Spill
+ movl %ebp, %eax
+ mull %edi
+ movl %edx, %ebp
+ movl %eax, 4(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ movl 12(%esp), %ecx # 4-byte Reload
+ mull %ecx
+ movl %edx, %esi
+ movl %eax, %edi
+ movl 36(%esp), %eax # 4-byte Reload
+ mull %ecx
+ movl %eax, %ecx
+ addl 16(%esp), %ecx # 4-byte Folded Reload
+ adcl %edi, %edx
+ adcl $0, %esi
+ addl (%esp), %ebp # 4-byte Folded Reload
+ adcl 8(%esp), %ebx # 4-byte Folded Reload
+ movl 28(%esp), %eax # 4-byte Reload
+ adcl $0, %eax
+ movl 4(%esp), %edi # 4-byte Reload
+ addl 32(%esp), %edi # 4-byte Folded Reload
+ adcl %ecx, %ebp
+ adcl %edx, %ebx
+ adcl %esi, %eax
+ movl %eax, 28(%esp) # 4-byte Spill
+ sbbl %eax, %eax
+ andl $1, %eax
+ movl %eax, 8(%esp) # 4-byte Spill
+ movl 84(%esp), %eax
+ movl 4(%eax), %ecx
+ movl %ecx, %eax
+ mull 20(%esp) # 4-byte Folded Reload
+ movl %edx, 32(%esp) # 4-byte Spill
+ movl %eax, 16(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ mull 36(%esp) # 4-byte Folded Reload
+ movl %edx, %esi
+ movl %eax, 12(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ mull 24(%esp) # 4-byte Folded Reload
+ movl %edx, %edi
+ addl 12(%esp), %edi # 4-byte Folded Reload
+ adcl 16(%esp), %esi # 4-byte Folded Reload
+ movl 32(%esp), %ecx # 4-byte Reload
+ adcl $0, %ecx
+ movl %eax, %edx
+ addl %ebp, %edx
+ movl %edx, 12(%esp) # 4-byte Spill
+ adcl %ebx, %edi
+ adcl 28(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 16(%esp) # 4-byte Spill
+ adcl 8(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 32(%esp) # 4-byte Spill
+ sbbl %eax, %eax
+ movl %edx, %esi
+ imull 52(%esp), %esi # 4-byte Folded Reload
+ andl $1, %eax
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl %esi, %eax
+ mull 48(%esp) # 4-byte Folded Reload
+ movl %edx, %ebp
+ movl %eax, 8(%esp) # 4-byte Spill
+ movl %esi, %eax
+ mull 44(%esp) # 4-byte Folded Reload
+ movl %edx, %ebx
+ movl %eax, 4(%esp) # 4-byte Spill
+ movl %esi, %eax
+ mull 40(%esp) # 4-byte Folded Reload
+ movl %edx, %ecx
+ addl 4(%esp), %ecx # 4-byte Folded Reload
+ adcl 8(%esp), %ebx # 4-byte Folded Reload
+ adcl $0, %ebp
+ addl 12(%esp), %eax # 4-byte Folded Reload
+ adcl %edi, %ecx
+ adcl 16(%esp), %ebx # 4-byte Folded Reload
+ adcl 32(%esp), %ebp # 4-byte Folded Reload
+ adcl $0, 28(%esp) # 4-byte Folded Spill
+ movl 84(%esp), %eax
+ movl 8(%eax), %esi
+ movl %esi, %eax
+ mull 20(%esp) # 4-byte Folded Reload
+ movl %edx, 32(%esp) # 4-byte Spill
+ movl %eax, 20(%esp) # 4-byte Spill
+ movl %esi, %eax
+ mull 36(%esp) # 4-byte Folded Reload
+ movl %edx, %edi
+ movl %eax, 16(%esp) # 4-byte Spill
+ movl %esi, %eax
+ mull 24(%esp) # 4-byte Folded Reload
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl %edx, %esi
+ addl 16(%esp), %esi # 4-byte Folded Reload
+ adcl 20(%esp), %edi # 4-byte Folded Reload
+ movl 32(%esp), %eax # 4-byte Reload
+ adcl $0, %eax
+ movl 36(%esp), %edx # 4-byte Reload
+ addl %ecx, %edx
+ movl %edx, 36(%esp) # 4-byte Spill
+ adcl %ebx, %esi
+ adcl %ebp, %edi
+ adcl 28(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 32(%esp) # 4-byte Spill
+ sbbl %ecx, %ecx
+ movl 52(%esp), %ebp # 4-byte Reload
+ imull %edx, %ebp
+ movl %ebp, 52(%esp) # 4-byte Spill
+ andl $1, %ecx
+ movl %ebp, %eax
+ mull 40(%esp) # 4-byte Folded Reload
+ movl %edx, 28(%esp) # 4-byte Spill
+ movl %eax, %ebx
+ movl %ebp, %eax
+ mull 48(%esp) # 4-byte Folded Reload
+ movl %edx, %ebp
+ movl %eax, 24(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ mull 44(%esp) # 4-byte Folded Reload
+ addl 28(%esp), %eax # 4-byte Folded Reload
+ adcl 24(%esp), %edx # 4-byte Folded Reload
+ adcl $0, %ebp
+ addl 36(%esp), %ebx # 4-byte Folded Reload
+ adcl %esi, %eax
+ adcl %edi, %edx
+ adcl 32(%esp), %ebp # 4-byte Folded Reload
+ adcl $0, %ecx
+ movl %eax, %ebx
+ subl 40(%esp), %ebx # 4-byte Folded Reload
+ movl %edx, %edi
+ sbbl 44(%esp), %edi # 4-byte Folded Reload
+ movl %ebp, %esi
+ sbbl 48(%esp), %esi # 4-byte Folded Reload
+ sbbl $0, %ecx
+ andl $1, %ecx
+ jne .LBB38_2
+# BB#1:
+ movl %ebx, %eax
+.LBB38_2:
+ movl 76(%esp), %ebx
+ movl %eax, (%ebx)
+ testb %cl, %cl
+ jne .LBB38_4
+# BB#3:
+ movl %edi, %edx
+.LBB38_4:
+ movl %edx, 4(%ebx)
+ jne .LBB38_6
+# BB#5:
+ movl %esi, %ebp
+.LBB38_6:
+ movl %ebp, 8(%ebx)
+ addl $56, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end38:
+ .size mcl_fp_mont3L, .Lfunc_end38-mcl_fp_mont3L
+
+ .globl mcl_fp_montNF3L
+ .align 16, 0x90
+ .type mcl_fp_montNF3L,@function
+mcl_fp_montNF3L: # @mcl_fp_montNF3L
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $60, %esp
+ movl 84(%esp), %ebp
+ movl (%ebp), %eax
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl 88(%esp), %ecx
+ movl (%ecx), %ecx
+ movl %ecx, 20(%esp) # 4-byte Spill
+ mull %ecx
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl %edx, 36(%esp) # 4-byte Spill
+ movl 92(%esp), %esi
+ movl -4(%esi), %edx
+ movl %edx, 56(%esp) # 4-byte Spill
+ movl %eax, %ecx
+ imull %edx, %ecx
+ movl (%esi), %ebx
+ movl %ebx, 44(%esp) # 4-byte Spill
+ movl 8(%esi), %edx
+ movl %edx, 52(%esp) # 4-byte Spill
+ movl 4(%esi), %edi
+ movl %edi, 48(%esp) # 4-byte Spill
+ movl 4(%ebp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 8(%ebp), %esi
+ movl %esi, 24(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ mull %edx
+ movl %edx, 16(%esp) # 4-byte Spill
+ movl %eax, 8(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ mull %edi
+ movl %edx, 12(%esp) # 4-byte Spill
+ movl %eax, (%esp) # 4-byte Spill
+ movl %ecx, %eax
+ mull %ebx
+ movl %edx, 4(%esp) # 4-byte Spill
+ movl %eax, %ebp
+ movl %esi, %eax
+ movl 20(%esp), %ecx # 4-byte Reload
+ mull %ecx
+ movl %edx, %edi
+ movl %eax, %ebx
+ movl 40(%esp), %eax # 4-byte Reload
+ mull %ecx
+ movl %edx, %ecx
+ movl %eax, %esi
+ addl 36(%esp), %esi # 4-byte Folded Reload
+ adcl %ebx, %ecx
+ adcl $0, %edi
+ addl 32(%esp), %ebp # 4-byte Folded Reload
+ adcl (%esp), %esi # 4-byte Folded Reload
+ adcl 8(%esp), %ecx # 4-byte Folded Reload
+ adcl $0, %edi
+ addl 4(%esp), %esi # 4-byte Folded Reload
+ adcl 12(%esp), %ecx # 4-byte Folded Reload
+ adcl 16(%esp), %edi # 4-byte Folded Reload
+ movl 88(%esp), %eax
+ movl 4(%eax), %ebx
+ movl %ebx, %eax
+ mull 24(%esp) # 4-byte Folded Reload
+ movl %edx, %ebp
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl %ebx, %eax
+ mull 40(%esp) # 4-byte Folded Reload
+ movl %edx, 32(%esp) # 4-byte Spill
+ movl %eax, 16(%esp) # 4-byte Spill
+ movl %ebx, %eax
+ mull 28(%esp) # 4-byte Folded Reload
+ movl %eax, 20(%esp) # 4-byte Spill
+ movl %edx, %ebx
+ addl 16(%esp), %ebx # 4-byte Folded Reload
+ movl 32(%esp), %eax # 4-byte Reload
+ adcl 36(%esp), %eax # 4-byte Folded Reload
+ adcl $0, %ebp
+ movl 20(%esp), %edx # 4-byte Reload
+ addl %esi, %edx
+ adcl %ecx, %ebx
+ adcl %edi, %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ adcl $0, %ebp
+ movl %edx, %ecx
+ movl %edx, %edi
+ imull 56(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, %eax
+ mull 52(%esp) # 4-byte Folded Reload
+ movl %edx, 20(%esp) # 4-byte Spill
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ mull 48(%esp) # 4-byte Folded Reload
+ movl %edx, 16(%esp) # 4-byte Spill
+ movl %eax, %esi
+ movl %ecx, %eax
+ mull 44(%esp) # 4-byte Folded Reload
+ addl %edi, %eax
+ adcl %ebx, %esi
+ movl 36(%esp), %eax # 4-byte Reload
+ adcl 32(%esp), %eax # 4-byte Folded Reload
+ adcl $0, %ebp
+ addl %edx, %esi
+ adcl 16(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 36(%esp) # 4-byte Spill
+ adcl 20(%esp), %ebp # 4-byte Folded Reload
+ movl 88(%esp), %eax
+ movl 8(%eax), %edi
+ movl %edi, %eax
+ mull 24(%esp) # 4-byte Folded Reload
+ movl %edx, %ebx
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl %edi, %eax
+ mull 40(%esp) # 4-byte Folded Reload
+ movl %edx, %ecx
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl %edi, %eax
+ mull 28(%esp) # 4-byte Folded Reload
+ movl %edx, %edi
+ addl 40(%esp), %edi # 4-byte Folded Reload
+ adcl 32(%esp), %ecx # 4-byte Folded Reload
+ adcl $0, %ebx
+ addl %esi, %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ adcl 36(%esp), %edi # 4-byte Folded Reload
+ adcl %ebp, %ecx
+ adcl $0, %ebx
+ movl 56(%esp), %esi # 4-byte Reload
+ imull %eax, %esi
+ movl %esi, 56(%esp) # 4-byte Spill
+ movl %esi, %eax
+ mull 44(%esp) # 4-byte Folded Reload
+ movl %edx, 40(%esp) # 4-byte Spill
+ movl %eax, %ebp
+ movl %esi, %eax
+ mull 52(%esp) # 4-byte Folded Reload
+ movl %edx, 36(%esp) # 4-byte Spill
+ movl %eax, %esi
+ movl 56(%esp), %eax # 4-byte Reload
+ mull 48(%esp) # 4-byte Folded Reload
+ addl 32(%esp), %ebp # 4-byte Folded Reload
+ adcl %edi, %eax
+ adcl %ecx, %esi
+ adcl $0, %ebx
+ addl 40(%esp), %eax # 4-byte Folded Reload
+ adcl %edx, %esi
+ adcl 36(%esp), %ebx # 4-byte Folded Reload
+ movl %eax, %edi
+ subl 44(%esp), %edi # 4-byte Folded Reload
+ movl %esi, %edx
+ sbbl 48(%esp), %edx # 4-byte Folded Reload
+ movl %ebx, %ecx
+ sbbl 52(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, %ebp
+ sarl $31, %ebp
+ testl %ebp, %ebp
+ js .LBB39_2
+# BB#1:
+ movl %edi, %eax
+.LBB39_2:
+ movl 80(%esp), %edi
+ movl %eax, (%edi)
+ js .LBB39_4
+# BB#3:
+ movl %edx, %esi
+.LBB39_4:
+ movl %esi, 4(%edi)
+ js .LBB39_6
+# BB#5:
+ movl %ecx, %ebx
+.LBB39_6:
+ movl %ebx, 8(%edi)
+ addl $60, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end39:
+ .size mcl_fp_montNF3L, .Lfunc_end39-mcl_fp_montNF3L
+
+ .globl mcl_fp_montRed3L
+ .align 16, 0x90
+ .type mcl_fp_montRed3L,@function
+mcl_fp_montRed3L: # @mcl_fp_montRed3L
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $40, %esp
+ movl 68(%esp), %eax
+ movl -4(%eax), %edx
+ movl %edx, 36(%esp) # 4-byte Spill
+ movl (%eax), %edi
+ movl %edi, 24(%esp) # 4-byte Spill
+ movl 64(%esp), %ebx
+ movl (%ebx), %ecx
+ movl %ecx, 16(%esp) # 4-byte Spill
+ imull %edx, %ecx
+ movl 8(%eax), %edx
+ movl %edx, 32(%esp) # 4-byte Spill
+ movl 4(%eax), %esi
+ movl %esi, 28(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ mull %edx
+ movl %edx, 20(%esp) # 4-byte Spill
+ movl %eax, 12(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ mull %esi
+ movl %edx, %esi
+ movl %eax, %ebp
+ movl %ecx, %eax
+ mull %edi
+ movl %edx, %ecx
+ addl %ebp, %ecx
+ adcl 12(%esp), %esi # 4-byte Folded Reload
+ movl 20(%esp), %edx # 4-byte Reload
+ adcl $0, %edx
+ addl 16(%esp), %eax # 4-byte Folded Reload
+ adcl 4(%ebx), %ecx
+ adcl 8(%ebx), %esi
+ adcl 12(%ebx), %edx
+ movl %edx, 20(%esp) # 4-byte Spill
+ movl 20(%ebx), %eax
+ movl 16(%ebx), %edx
+ adcl $0, %edx
+ movl %edx, 8(%esp) # 4-byte Spill
+ adcl $0, %eax
+ movl %eax, 12(%esp) # 4-byte Spill
+ sbbl %ebx, %ebx
+ andl $1, %ebx
+ movl %ecx, %edi
+ imull 36(%esp), %edi # 4-byte Folded Reload
+ movl %edi, %eax
+ mull 32(%esp) # 4-byte Folded Reload
+ movl %edx, 16(%esp) # 4-byte Spill
+ movl %eax, 4(%esp) # 4-byte Spill
+ movl %edi, %eax
+ mull 28(%esp) # 4-byte Folded Reload
+ movl %edx, %ebp
+ movl %eax, (%esp) # 4-byte Spill
+ movl %edi, %eax
+ mull 24(%esp) # 4-byte Folded Reload
+ movl %edx, %edi
+ addl (%esp), %edi # 4-byte Folded Reload
+ adcl 4(%esp), %ebp # 4-byte Folded Reload
+ movl 16(%esp), %edx # 4-byte Reload
+ adcl $0, %edx
+ addl %ecx, %eax
+ adcl %esi, %edi
+ adcl 20(%esp), %ebp # 4-byte Folded Reload
+ adcl 8(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 16(%esp) # 4-byte Spill
+ adcl $0, 12(%esp) # 4-byte Folded Spill
+ adcl $0, %ebx
+ movl 36(%esp), %ecx # 4-byte Reload
+ imull %edi, %ecx
+ movl %ecx, 36(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ mull 32(%esp) # 4-byte Folded Reload
+ movl %edx, %esi
+ movl %eax, 20(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ mull 24(%esp) # 4-byte Folded Reload
+ movl %edx, 8(%esp) # 4-byte Spill
+ movl %eax, %ecx
+ movl 36(%esp), %eax # 4-byte Reload
+ mull 28(%esp) # 4-byte Folded Reload
+ addl 8(%esp), %eax # 4-byte Folded Reload
+ adcl 20(%esp), %edx # 4-byte Folded Reload
+ adcl $0, %esi
+ addl %edi, %ecx
+ adcl %ebp, %eax
+ adcl 16(%esp), %edx # 4-byte Folded Reload
+ adcl 12(%esp), %esi # 4-byte Folded Reload
+ adcl $0, %ebx
+ movl %eax, %ebp
+ subl 24(%esp), %ebp # 4-byte Folded Reload
+ movl %edx, %edi
+ sbbl 28(%esp), %edi # 4-byte Folded Reload
+ movl %esi, %ecx
+ sbbl 32(%esp), %ecx # 4-byte Folded Reload
+ sbbl $0, %ebx
+ andl $1, %ebx
+ jne .LBB40_2
+# BB#1:
+ movl %ebp, %eax
+.LBB40_2:
+ movl 60(%esp), %ebp
+ movl %eax, (%ebp)
+ testb %bl, %bl
+ jne .LBB40_4
+# BB#3:
+ movl %edi, %edx
+.LBB40_4:
+ movl %edx, 4(%ebp)
+ jne .LBB40_6
+# BB#5:
+ movl %ecx, %esi
+.LBB40_6:
+ movl %esi, 8(%ebp)
+ addl $40, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end40:
+ .size mcl_fp_montRed3L, .Lfunc_end40-mcl_fp_montRed3L
+
+ .globl mcl_fp_addPre3L
+ .align 16, 0x90
+ .type mcl_fp_addPre3L,@function
+mcl_fp_addPre3L: # @mcl_fp_addPre3L
+# BB#0:
+ pushl %esi
+ movl 16(%esp), %eax
+ movl (%eax), %ecx
+ movl 4(%eax), %edx
+ movl 12(%esp), %esi
+ addl (%esi), %ecx
+ adcl 4(%esi), %edx
+ movl 8(%eax), %eax
+ adcl 8(%esi), %eax
+ movl 8(%esp), %esi
+ movl %ecx, (%esi)
+ movl %edx, 4(%esi)
+ movl %eax, 8(%esi)
+ sbbl %eax, %eax
+ andl $1, %eax
+ popl %esi
+ retl
+.Lfunc_end41:
+ .size mcl_fp_addPre3L, .Lfunc_end41-mcl_fp_addPre3L
+
+ .globl mcl_fp_subPre3L
+ .align 16, 0x90
+ .type mcl_fp_subPre3L,@function
+mcl_fp_subPre3L: # @mcl_fp_subPre3L
+# BB#0:
+ pushl %edi
+ pushl %esi
+ movl 16(%esp), %ecx
+ movl (%ecx), %edx
+ movl 4(%ecx), %esi
+ xorl %eax, %eax
+ movl 20(%esp), %edi
+ subl (%edi), %edx
+ sbbl 4(%edi), %esi
+ movl 8(%ecx), %ecx
+ sbbl 8(%edi), %ecx
+ movl 12(%esp), %edi
+ movl %edx, (%edi)
+ movl %esi, 4(%edi)
+ movl %ecx, 8(%edi)
+ sbbl $0, %eax
+ andl $1, %eax
+ popl %esi
+ popl %edi
+ retl
+.Lfunc_end42:
+ .size mcl_fp_subPre3L, .Lfunc_end42-mcl_fp_subPre3L
+
+ .globl mcl_fp_shr1_3L
+ .align 16, 0x90
+ .type mcl_fp_shr1_3L,@function
+mcl_fp_shr1_3L: # @mcl_fp_shr1_3L
+# BB#0:
+ pushl %esi
+ movl 12(%esp), %eax
+ movl 8(%eax), %ecx
+ movl (%eax), %edx
+ movl 4(%eax), %eax
+ shrdl $1, %eax, %edx
+ movl 8(%esp), %esi
+ movl %edx, (%esi)
+ shrdl $1, %ecx, %eax
+ movl %eax, 4(%esi)
+ shrl %ecx
+ movl %ecx, 8(%esi)
+ popl %esi
+ retl
+.Lfunc_end43:
+ .size mcl_fp_shr1_3L, .Lfunc_end43-mcl_fp_shr1_3L
+
+ .globl mcl_fp_add3L
+ .align 16, 0x90
+ .type mcl_fp_add3L,@function
+mcl_fp_add3L: # @mcl_fp_add3L
+# BB#0:
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ movl 24(%esp), %edx
+ movl (%edx), %eax
+ movl 4(%edx), %ecx
+ movl 20(%esp), %esi
+ addl (%esi), %eax
+ adcl 4(%esi), %ecx
+ movl 8(%edx), %edx
+ adcl 8(%esi), %edx
+ movl 16(%esp), %esi
+ movl %eax, (%esi)
+ movl %ecx, 4(%esi)
+ movl %edx, 8(%esi)
+ sbbl %ebx, %ebx
+ andl $1, %ebx
+ movl 28(%esp), %edi
+ subl (%edi), %eax
+ sbbl 4(%edi), %ecx
+ sbbl 8(%edi), %edx
+ sbbl $0, %ebx
+ testb $1, %bl
+ jne .LBB44_2
+# BB#1: # %nocarry
+ movl %eax, (%esi)
+ movl %ecx, 4(%esi)
+ movl %edx, 8(%esi)
+.LBB44_2: # %carry
+ popl %esi
+ popl %edi
+ popl %ebx
+ retl
+.Lfunc_end44:
+ .size mcl_fp_add3L, .Lfunc_end44-mcl_fp_add3L
+
+ .globl mcl_fp_addNF3L
+ .align 16, 0x90
+ .type mcl_fp_addNF3L,@function
+mcl_fp_addNF3L: # @mcl_fp_addNF3L
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ movl 28(%esp), %eax
+ movl (%eax), %edx
+ movl 4(%eax), %ecx
+ movl 24(%esp), %esi
+ addl (%esi), %edx
+ adcl 4(%esi), %ecx
+ movl 8(%eax), %eax
+ adcl 8(%esi), %eax
+ movl 32(%esp), %ebp
+ movl %edx, %ebx
+ subl (%ebp), %ebx
+ movl %ecx, %edi
+ sbbl 4(%ebp), %edi
+ movl %eax, %esi
+ sbbl 8(%ebp), %esi
+ movl %esi, %ebp
+ sarl $31, %ebp
+ testl %ebp, %ebp
+ js .LBB45_2
+# BB#1:
+ movl %ebx, %edx
+.LBB45_2:
+ movl 20(%esp), %ebx
+ movl %edx, (%ebx)
+ js .LBB45_4
+# BB#3:
+ movl %edi, %ecx
+.LBB45_4:
+ movl %ecx, 4(%ebx)
+ js .LBB45_6
+# BB#5:
+ movl %esi, %eax
+.LBB45_6:
+ movl %eax, 8(%ebx)
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end45:
+ .size mcl_fp_addNF3L, .Lfunc_end45-mcl_fp_addNF3L
+
+ .globl mcl_fp_sub3L
+ .align 16, 0x90
+ .type mcl_fp_sub3L,@function
+mcl_fp_sub3L: # @mcl_fp_sub3L
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ movl 24(%esp), %edx
+ movl (%edx), %ecx
+ movl 4(%edx), %eax
+ xorl %ebx, %ebx
+ movl 28(%esp), %esi
+ subl (%esi), %ecx
+ sbbl 4(%esi), %eax
+ movl 8(%edx), %edx
+ sbbl 8(%esi), %edx
+ movl 20(%esp), %esi
+ movl %ecx, (%esi)
+ movl %eax, 4(%esi)
+ movl %edx, 8(%esi)
+ sbbl $0, %ebx
+ testb $1, %bl
+ je .LBB46_2
+# BB#1: # %carry
+ movl 32(%esp), %edi
+ movl 4(%edi), %ebx
+ movl 8(%edi), %ebp
+ addl (%edi), %ecx
+ movl %ecx, (%esi)
+ adcl %eax, %ebx
+ movl %ebx, 4(%esi)
+ adcl %edx, %ebp
+ movl %ebp, 8(%esi)
+.LBB46_2: # %nocarry
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end46:
+ .size mcl_fp_sub3L, .Lfunc_end46-mcl_fp_sub3L
+
+ .globl mcl_fp_subNF3L
+ .align 16, 0x90
+ .type mcl_fp_subNF3L,@function
+mcl_fp_subNF3L: # @mcl_fp_subNF3L
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ movl 24(%esp), %eax
+ movl (%eax), %ecx
+ movl 4(%eax), %edx
+ movl 28(%esp), %esi
+ subl (%esi), %ecx
+ sbbl 4(%esi), %edx
+ movl 8(%eax), %eax
+ sbbl 8(%esi), %eax
+ movl %eax, %esi
+ sarl $31, %esi
+ movl %esi, %edi
+ shldl $1, %eax, %edi
+ movl 32(%esp), %ebx
+ andl (%ebx), %edi
+ movl 8(%ebx), %ebp
+ andl %esi, %ebp
+ andl 4(%ebx), %esi
+ addl %ecx, %edi
+ adcl %edx, %esi
+ movl 20(%esp), %ecx
+ movl %edi, (%ecx)
+ movl %esi, 4(%ecx)
+ adcl %eax, %ebp
+ movl %ebp, 8(%ecx)
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end47:
+ .size mcl_fp_subNF3L, .Lfunc_end47-mcl_fp_subNF3L
+
+ .globl mcl_fpDbl_add3L
+ .align 16, 0x90
+ .type mcl_fpDbl_add3L,@function
+mcl_fpDbl_add3L: # @mcl_fpDbl_add3L
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ pushl %eax
+ movl 32(%esp), %esi
+ movl 20(%esi), %eax
+ movl %eax, (%esp) # 4-byte Spill
+ movl 16(%esi), %edi
+ movl 12(%esi), %ebx
+ movl (%esi), %edx
+ movl 28(%esp), %eax
+ addl (%eax), %edx
+ movl 24(%esp), %ecx
+ movl %edx, (%ecx)
+ movl 8(%esi), %edx
+ movl 4(%esi), %esi
+ adcl 4(%eax), %esi
+ adcl 8(%eax), %edx
+ movl %esi, 4(%ecx)
+ movl 20(%eax), %ebp
+ movl %edx, 8(%ecx)
+ movl 12(%eax), %esi
+ movl 16(%eax), %edx
+ adcl %ebx, %esi
+ adcl %edi, %edx
+ adcl (%esp), %ebp # 4-byte Folded Reload
+ sbbl %eax, %eax
+ andl $1, %eax
+ movl 36(%esp), %ecx
+ movl %esi, %ebx
+ subl (%ecx), %ebx
+ movl %edx, %edi
+ sbbl 4(%ecx), %edi
+ movl %edi, (%esp) # 4-byte Spill
+ movl %ebp, %ecx
+ movl 36(%esp), %edi
+ sbbl 8(%edi), %ecx
+ sbbl $0, %eax
+ andl $1, %eax
+ jne .LBB48_2
+# BB#1:
+ movl %ecx, %ebp
+.LBB48_2:
+ testb %al, %al
+ jne .LBB48_4
+# BB#3:
+ movl %ebx, %esi
+.LBB48_4:
+ movl 24(%esp), %eax
+ movl %esi, 12(%eax)
+ jne .LBB48_6
+# BB#5:
+ movl (%esp), %edx # 4-byte Reload
+.LBB48_6:
+ movl %edx, 16(%eax)
+ movl %ebp, 20(%eax)
+ addl $4, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end48:
+ .size mcl_fpDbl_add3L, .Lfunc_end48-mcl_fpDbl_add3L
+
+ .globl mcl_fpDbl_sub3L
+ .align 16, 0x90
+ .type mcl_fpDbl_sub3L,@function
+mcl_fpDbl_sub3L: # @mcl_fpDbl_sub3L
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ movl 24(%esp), %ecx
+ movl (%ecx), %edx
+ movl 4(%ecx), %esi
+ movl 28(%esp), %ebx
+ subl (%ebx), %edx
+ sbbl 4(%ebx), %esi
+ movl 8(%ecx), %ebp
+ sbbl 8(%ebx), %ebp
+ movl 20(%esp), %eax
+ movl %edx, (%eax)
+ movl 12(%ecx), %edi
+ sbbl 12(%ebx), %edi
+ movl %esi, 4(%eax)
+ movl 16(%ecx), %esi
+ sbbl 16(%ebx), %esi
+ movl 20(%ebx), %ebx
+ movl 20(%ecx), %edx
+ movl %ebp, 8(%eax)
+ sbbl %ebx, %edx
+ movl $0, %ecx
+ sbbl $0, %ecx
+ andl $1, %ecx
+ movl 32(%esp), %ebp
+ jne .LBB49_1
+# BB#2:
+ xorl %ebx, %ebx
+ jmp .LBB49_3
+.LBB49_1:
+ movl 8(%ebp), %ebx
+.LBB49_3:
+ testb %cl, %cl
+ movl $0, %eax
+ jne .LBB49_4
+# BB#5:
+ xorl %ecx, %ecx
+ jmp .LBB49_6
+.LBB49_4:
+ movl (%ebp), %ecx
+ movl 4(%ebp), %eax
+.LBB49_6:
+ addl %edi, %ecx
+ adcl %esi, %eax
+ movl 20(%esp), %esi
+ movl %ecx, 12(%esi)
+ movl %eax, 16(%esi)
+ adcl %edx, %ebx
+ movl %ebx, 20(%esi)
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end49:
+ .size mcl_fpDbl_sub3L, .Lfunc_end49-mcl_fpDbl_sub3L
+
+ .globl mcl_fp_mulUnitPre4L
+ .align 16, 0x90
+ .type mcl_fp_mulUnitPre4L,@function
+mcl_fp_mulUnitPre4L: # @mcl_fp_mulUnitPre4L
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $12, %esp
+ movl 40(%esp), %ecx
+ movl 36(%esp), %ebp
+ movl %ecx, %eax
+ mull 12(%ebp)
+ movl %edx, %esi
+ movl %eax, 8(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ mull 8(%ebp)
+ movl %edx, %ebx
+ movl %eax, 4(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ mull 4(%ebp)
+ movl %edx, %edi
+ movl %eax, (%esp) # 4-byte Spill
+ movl %ecx, %eax
+ mull (%ebp)
+ movl 32(%esp), %ecx
+ movl %eax, (%ecx)
+ addl (%esp), %edx # 4-byte Folded Reload
+ movl %edx, 4(%ecx)
+ adcl 4(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 8(%ecx)
+ adcl 8(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 12(%ecx)
+ adcl $0, %esi
+ movl %esi, 16(%ecx)
+ addl $12, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end50:
+ .size mcl_fp_mulUnitPre4L, .Lfunc_end50-mcl_fp_mulUnitPre4L
+
+ .globl mcl_fpDbl_mulPre4L
+ .align 16, 0x90
+ .type mcl_fpDbl_mulPre4L,@function
+mcl_fpDbl_mulPre4L: # @mcl_fpDbl_mulPre4L
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $56, %esp
+ movl 80(%esp), %edi
+ movl (%edi), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 84(%esp), %ecx
+ movl (%ecx), %esi
+ movl %ecx, %ebp
+ mull %esi
+ movl %edx, 12(%esp) # 4-byte Spill
+ movl 76(%esp), %ecx
+ movl %eax, (%ecx)
+ movl 4(%edi), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 8(%edi), %ebx
+ movl %ebx, 24(%esp) # 4-byte Spill
+ movl 12(%edi), %eax
+ movl %eax, 20(%esp) # 4-byte Spill
+ movl 4(%ebp), %ecx
+ movl %eax, %ebp
+ mull %ecx
+ movl %edx, 44(%esp) # 4-byte Spill
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl %ebx, %eax
+ mull %ecx
+ movl %edx, 36(%esp) # 4-byte Spill
+ movl %eax, 16(%esp) # 4-byte Spill
+ movl 40(%esp), %edi # 4-byte Reload
+ movl %edi, %eax
+ mull %ecx
+ movl %edx, 32(%esp) # 4-byte Spill
+ movl %eax, 4(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ mull %ecx
+ movl %edx, 8(%esp) # 4-byte Spill
+ movl %eax, (%esp) # 4-byte Spill
+ movl %ebp, %eax
+ mull %esi
+ movl %edx, 52(%esp) # 4-byte Spill
+ movl %eax, %ebp
+ movl %ebx, %eax
+ mull %esi
+ movl %edx, %ecx
+ movl %eax, %ebx
+ movl %edi, %eax
+ mull %esi
+ movl %edx, %edi
+ addl 12(%esp), %eax # 4-byte Folded Reload
+ adcl %ebx, %edi
+ adcl %ebp, %ecx
+ movl 52(%esp), %esi # 4-byte Reload
+ adcl $0, %esi
+ addl (%esp), %eax # 4-byte Folded Reload
+ movl 76(%esp), %edx
+ movl %eax, 4(%edx)
+ adcl 4(%esp), %edi # 4-byte Folded Reload
+ adcl 16(%esp), %ecx # 4-byte Folded Reload
+ adcl 28(%esp), %esi # 4-byte Folded Reload
+ sbbl %eax, %eax
+ andl $1, %eax
+ addl 8(%esp), %edi # 4-byte Folded Reload
+ adcl 32(%esp), %ecx # 4-byte Folded Reload
+ adcl 36(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 52(%esp) # 4-byte Spill
+ adcl 44(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 16(%esp) # 4-byte Spill
+ movl 84(%esp), %eax
+ movl 8(%eax), %esi
+ movl 20(%esp), %eax # 4-byte Reload
+ mull %esi
+ movl %edx, 36(%esp) # 4-byte Spill
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 24(%esp), %eax # 4-byte Reload
+ mull %esi
+ movl %edx, 32(%esp) # 4-byte Spill
+ movl %eax, %ebx
+ movl 40(%esp), %eax # 4-byte Reload
+ mull %esi
+ movl %edx, 40(%esp) # 4-byte Spill
+ movl %eax, %ebp
+ movl 48(%esp), %eax # 4-byte Reload
+ mull %esi
+ movl %edx, 28(%esp) # 4-byte Spill
+ addl %edi, %eax
+ movl 76(%esp), %edx
+ movl %eax, 8(%edx)
+ adcl %ecx, %ebp
+ adcl 52(%esp), %ebx # 4-byte Folded Reload
+ movl 16(%esp), %eax # 4-byte Reload
+ adcl %eax, 44(%esp) # 4-byte Folded Spill
+ movl 84(%esp), %eax
+ movl 12(%eax), %esi
+ sbbl %ecx, %ecx
+ movl %esi, %eax
+ movl 80(%esp), %edi
+ mull 12(%edi)
+ movl %edx, 52(%esp) # 4-byte Spill
+ movl %eax, 24(%esp) # 4-byte Spill
+ movl %esi, %eax
+ mull 8(%edi)
+ movl %edx, 48(%esp) # 4-byte Spill
+ movl %eax, 16(%esp) # 4-byte Spill
+ movl %esi, %eax
+ mull 4(%edi)
+ movl %edx, 20(%esp) # 4-byte Spill
+ movl %eax, %edi
+ movl %esi, %eax
+ movl 80(%esp), %edx
+ mull (%edx)
+ movl %eax, %esi
+ andl $1, %ecx
+ addl 28(%esp), %ebp # 4-byte Folded Reload
+ adcl 40(%esp), %ebx # 4-byte Folded Reload
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 32(%esp), %eax # 4-byte Folded Reload
+ adcl 36(%esp), %ecx # 4-byte Folded Reload
+ addl %esi, %ebp
+ movl 76(%esp), %esi
+ movl %ebp, 12(%esi)
+ adcl %edi, %ebx
+ movl %eax, %edi
+ adcl 16(%esp), %edi # 4-byte Folded Reload
+ adcl 24(%esp), %ecx # 4-byte Folded Reload
+ sbbl %eax, %eax
+ andl $1, %eax
+ addl %edx, %ebx
+ adcl 20(%esp), %edi # 4-byte Folded Reload
+ movl %esi, %edx
+ movl %ebx, 16(%edx)
+ adcl 48(%esp), %ecx # 4-byte Folded Reload
+ movl %edi, 20(%edx)
+ movl %ecx, 24(%edx)
+ adcl 52(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 28(%edx)
+ addl $56, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end51:
+ .size mcl_fpDbl_mulPre4L, .Lfunc_end51-mcl_fpDbl_mulPre4L
+
+ .globl mcl_fpDbl_sqrPre4L
+ .align 16, 0x90
+ .type mcl_fpDbl_sqrPre4L,@function
+mcl_fpDbl_sqrPre4L: # @mcl_fpDbl_sqrPre4L
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $40, %esp
+ movl 64(%esp), %ecx
+ movl 12(%ecx), %ebp
+ movl %ebp, 16(%esp) # 4-byte Spill
+ movl (%ecx), %ebx
+ movl 4(%ecx), %esi
+ movl %ebp, %eax
+ mull %esi
+ movl %eax, 12(%esp) # 4-byte Spill
+ movl %edx, 36(%esp) # 4-byte Spill
+ movl 8(%ecx), %edi
+ movl %edi, %eax
+ mull %esi
+ movl %edx, 32(%esp) # 4-byte Spill
+ movl %eax, 24(%esp) # 4-byte Spill
+ movl %ebp, %eax
+ mull %ebx
+ movl %edx, %ebp
+ movl %eax, %ecx
+ movl %edi, %eax
+ mull %ebx
+ movl %edx, 28(%esp) # 4-byte Spill
+ movl %eax, 20(%esp) # 4-byte Spill
+ movl %esi, %eax
+ mull %esi
+ movl %edx, 8(%esp) # 4-byte Spill
+ movl %eax, 4(%esp) # 4-byte Spill
+ movl %esi, %eax
+ mull %ebx
+ movl %edx, (%esp) # 4-byte Spill
+ movl %eax, %esi
+ movl %ebx, %eax
+ mull %ebx
+ movl 60(%esp), %ebx
+ movl %eax, (%ebx)
+ addl %esi, %edx
+ movl (%esp), %eax # 4-byte Reload
+ movl %eax, %ebx
+ adcl 20(%esp), %ebx # 4-byte Folded Reload
+ adcl 28(%esp), %ecx # 4-byte Folded Reload
+ adcl $0, %ebp
+ addl %esi, %edx
+ movl 60(%esp), %esi
+ movl %edx, 4(%esi)
+ adcl 4(%esp), %ebx # 4-byte Folded Reload
+ adcl 24(%esp), %ecx # 4-byte Folded Reload
+ adcl 12(%esp), %ebp # 4-byte Folded Reload
+ sbbl %esi, %esi
+ andl $1, %esi
+ addl %eax, %ebx
+ adcl 8(%esp), %ecx # 4-byte Folded Reload
+ adcl 32(%esp), %ebp # 4-byte Folded Reload
+ adcl 36(%esp), %esi # 4-byte Folded Reload
+ movl 16(%esp), %eax # 4-byte Reload
+ mull %edi
+ movl %edx, 16(%esp) # 4-byte Spill
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl %edi, %eax
+ mull %edi
+ movl %eax, %edi
+ addl 20(%esp), %ebx # 4-byte Folded Reload
+ movl 60(%esp), %eax
+ movl %ebx, 8(%eax)
+ adcl 24(%esp), %ecx # 4-byte Folded Reload
+ adcl %ebp, %edi
+ movl 36(%esp), %eax # 4-byte Reload
+ adcl %esi, %eax
+ sbbl %esi, %esi
+ andl $1, %esi
+ addl 28(%esp), %ecx # 4-byte Folded Reload
+ adcl 32(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 24(%esp) # 4-byte Spill
+ adcl %edx, %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ adcl 16(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 20(%esp) # 4-byte Spill
+ movl 64(%esp), %esi
+ movl 12(%esi), %ebp
+ movl %ebp, %eax
+ mull 8(%esi)
+ movl %edx, 32(%esp) # 4-byte Spill
+ movl %eax, %ebx
+ movl %ebp, %eax
+ mull 4(%esi)
+ movl %esi, %edi
+ movl %edx, 28(%esp) # 4-byte Spill
+ movl %eax, %esi
+ movl %ebp, %eax
+ mull (%edi)
+ movl %edx, 16(%esp) # 4-byte Spill
+ movl %eax, %edi
+ movl %ebp, %eax
+ mull %ebp
+ addl %ecx, %edi
+ movl 60(%esp), %ebp
+ movl %edi, 12(%ebp)
+ adcl 24(%esp), %esi # 4-byte Folded Reload
+ adcl 36(%esp), %ebx # 4-byte Folded Reload
+ adcl 20(%esp), %eax # 4-byte Folded Reload
+ sbbl %ecx, %ecx
+ andl $1, %ecx
+ addl 16(%esp), %esi # 4-byte Folded Reload
+ adcl 28(%esp), %ebx # 4-byte Folded Reload
+ movl %ebp, %edi
+ movl %esi, 16(%edi)
+ adcl 32(%esp), %eax # 4-byte Folded Reload
+ movl %ebx, 20(%edi)
+ movl %eax, 24(%edi)
+ adcl %edx, %ecx
+ movl %ecx, 28(%edi)
+ addl $40, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end52:
+ .size mcl_fpDbl_sqrPre4L, .Lfunc_end52-mcl_fpDbl_sqrPre4L
+
+ .globl mcl_fp_mont4L
+ .align 16, 0x90
+ .type mcl_fp_mont4L,@function
+mcl_fp_mont4L: # @mcl_fp_mont4L
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $88, %esp
+ movl 112(%esp), %ecx
+ movl (%ecx), %eax
+ movl %ecx, %ebp
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 116(%esp), %edx
+ movl (%edx), %edx
+ movl %edx, 28(%esp) # 4-byte Spill
+ mull %edx
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl %edx, 32(%esp) # 4-byte Spill
+ movl 120(%esp), %edi
+ movl -4(%edi), %edx
+ movl %edx, 80(%esp) # 4-byte Spill
+ movl %eax, %ebx
+ imull %edx, %ebx
+ movl (%edi), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 12(%edi), %edx
+ movl %edx, 76(%esp) # 4-byte Spill
+ movl 8(%edi), %esi
+ movl %esi, 72(%esp) # 4-byte Spill
+ movl 4(%edi), %ecx
+ movl %ecx, 68(%esp) # 4-byte Spill
+ movl %ebp, %edi
+ movl 4(%edi), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 12(%edi), %ebp
+ movl %ebp, 52(%esp) # 4-byte Spill
+ movl 8(%edi), %edi
+ movl %edi, 56(%esp) # 4-byte Spill
+ movl %ebx, %eax
+ mull %edx
+ movl %edx, 44(%esp) # 4-byte Spill
+ movl %eax, 24(%esp) # 4-byte Spill
+ movl %ebx, %eax
+ mull %esi
+ movl %edx, 20(%esp) # 4-byte Spill
+ movl %eax, 16(%esp) # 4-byte Spill
+ movl %ebx, %eax
+ mull %ecx
+ movl %edx, 40(%esp) # 4-byte Spill
+ movl %eax, 8(%esp) # 4-byte Spill
+ movl %ebx, %eax
+ mull 84(%esp) # 4-byte Folded Reload
+ movl %edx, 36(%esp) # 4-byte Spill
+ movl %eax, 12(%esp) # 4-byte Spill
+ movl %ebp, %eax
+ movl 28(%esp), %esi # 4-byte Reload
+ mull %esi
+ movl %edx, %ecx
+ movl %eax, 4(%esp) # 4-byte Spill
+ movl %edi, %eax
+ mull %esi
+ movl %edx, %ebp
+ movl %eax, (%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ mull %esi
+ movl %edx, %ebx
+ movl %eax, %edi
+ addl 32(%esp), %edi # 4-byte Folded Reload
+ adcl (%esp), %ebx # 4-byte Folded Reload
+ adcl 4(%esp), %ebp # 4-byte Folded Reload
+ movl %ebp, 28(%esp) # 4-byte Spill
+ adcl $0, %ecx
+ movl %ecx, 32(%esp) # 4-byte Spill
+ movl 36(%esp), %esi # 4-byte Reload
+ addl 8(%esp), %esi # 4-byte Folded Reload
+ movl 40(%esp), %edx # 4-byte Reload
+ adcl 16(%esp), %edx # 4-byte Folded Reload
+ movl 20(%esp), %ecx # 4-byte Reload
+ adcl 24(%esp), %ecx # 4-byte Folded Reload
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl $0, %eax
+ movl 12(%esp), %ebp # 4-byte Reload
+ addl 48(%esp), %ebp # 4-byte Folded Reload
+ adcl %edi, %esi
+ movl %esi, 36(%esp) # 4-byte Spill
+ adcl %ebx, %edx
+ movl %edx, 40(%esp) # 4-byte Spill
+ adcl 28(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, %ebx
+ adcl 32(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 44(%esp) # 4-byte Spill
+ sbbl %ebp, %ebp
+ andl $1, %ebp
+ movl 116(%esp), %eax
+ movl 4(%eax), %esi
+ movl %esi, %eax
+ mull 52(%esp) # 4-byte Folded Reload
+ movl %edx, %edi
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl %esi, %eax
+ mull 56(%esp) # 4-byte Folded Reload
+ movl %edx, 48(%esp) # 4-byte Spill
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl %esi, %eax
+ mull 64(%esp) # 4-byte Folded Reload
+ movl %edx, %ecx
+ movl %eax, 24(%esp) # 4-byte Spill
+ movl %esi, %eax
+ mull 60(%esp) # 4-byte Folded Reload
+ movl %edx, %esi
+ addl 24(%esp), %esi # 4-byte Folded Reload
+ movl %ecx, %edx
+ adcl 28(%esp), %edx # 4-byte Folded Reload
+ movl 48(%esp), %ecx # 4-byte Reload
+ adcl 32(%esp), %ecx # 4-byte Folded Reload
+ adcl $0, %edi
+ addl 36(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 28(%esp) # 4-byte Spill
+ adcl 40(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 40(%esp) # 4-byte Spill
+ adcl %ebx, %edx
+ movl %edx, 32(%esp) # 4-byte Spill
+ adcl 44(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 48(%esp) # 4-byte Spill
+ adcl %ebp, %edi
+ movl %edi, 44(%esp) # 4-byte Spill
+ sbbl %esi, %esi
+ movl %eax, %ecx
+ imull 80(%esp), %ecx # 4-byte Folded Reload
+ andl $1, %esi
+ movl %ecx, %eax
+ mull 76(%esp) # 4-byte Folded Reload
+ movl %edx, 36(%esp) # 4-byte Spill
+ movl %eax, 24(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ mull 72(%esp) # 4-byte Folded Reload
+ movl %edx, %ebx
+ movl %eax, 20(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ mull 68(%esp) # 4-byte Folded Reload
+ movl %edx, %ebp
+ movl %eax, 16(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ mull 84(%esp) # 4-byte Folded Reload
+ movl %edx, %edi
+ addl 16(%esp), %edi # 4-byte Folded Reload
+ adcl 20(%esp), %ebp # 4-byte Folded Reload
+ adcl 24(%esp), %ebx # 4-byte Folded Reload
+ movl 36(%esp), %ecx # 4-byte Reload
+ adcl $0, %ecx
+ addl 28(%esp), %eax # 4-byte Folded Reload
+ adcl 40(%esp), %edi # 4-byte Folded Reload
+ adcl 32(%esp), %ebp # 4-byte Folded Reload
+ adcl 48(%esp), %ebx # 4-byte Folded Reload
+ adcl 44(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 36(%esp) # 4-byte Spill
+ adcl $0, %esi
+ movl %esi, 28(%esp) # 4-byte Spill
+ movl 116(%esp), %eax
+ movl 8(%eax), %esi
+ movl %esi, %eax
+ mull 52(%esp) # 4-byte Folded Reload
+ movl %edx, %ecx
+ movl %eax, 24(%esp) # 4-byte Spill
+ movl %esi, %eax
+ mull 56(%esp) # 4-byte Folded Reload
+ movl %edx, 44(%esp) # 4-byte Spill
+ movl %eax, 20(%esp) # 4-byte Spill
+ movl %esi, %eax
+ mull 64(%esp) # 4-byte Folded Reload
+ movl %edx, 48(%esp) # 4-byte Spill
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl %esi, %eax
+ mull 60(%esp) # 4-byte Folded Reload
+ movl %eax, 40(%esp) # 4-byte Spill
+ addl 32(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 32(%esp) # 4-byte Spill
+ movl 48(%esp), %esi # 4-byte Reload
+ adcl 20(%esp), %esi # 4-byte Folded Reload
+ movl 44(%esp), %edx # 4-byte Reload
+ adcl 24(%esp), %edx # 4-byte Folded Reload
+ movl %ecx, %eax
+ adcl $0, %eax
+ movl 40(%esp), %ecx # 4-byte Reload
+ addl %edi, %ecx
+ movl %ecx, 40(%esp) # 4-byte Spill
+ adcl %ebp, 32(%esp) # 4-byte Folded Spill
+ adcl %ebx, %esi
+ movl %esi, 48(%esp) # 4-byte Spill
+ adcl 36(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 44(%esp) # 4-byte Spill
+ adcl 28(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 28(%esp) # 4-byte Spill
+ sbbl %eax, %eax
+ movl %ecx, %esi
+ imull 80(%esp), %esi # 4-byte Folded Reload
+ andl $1, %eax
+ movl %eax, %ecx
+ movl %esi, %eax
+ mull 76(%esp) # 4-byte Folded Reload
+ movl %edx, 36(%esp) # 4-byte Spill
+ movl %eax, 24(%esp) # 4-byte Spill
+ movl %esi, %eax
+ mull 72(%esp) # 4-byte Folded Reload
+ movl %edx, %ebp
+ movl %eax, 20(%esp) # 4-byte Spill
+ movl %esi, %eax
+ mull 68(%esp) # 4-byte Folded Reload
+ movl %edx, %ebx
+ movl %eax, %edi
+ movl %esi, %eax
+ mull 84(%esp) # 4-byte Folded Reload
+ movl %edx, %esi
+ addl %edi, %esi
+ adcl 20(%esp), %ebx # 4-byte Folded Reload
+ adcl 24(%esp), %ebp # 4-byte Folded Reload
+ movl 36(%esp), %edx # 4-byte Reload
+ adcl $0, %edx
+ addl 40(%esp), %eax # 4-byte Folded Reload
+ adcl 32(%esp), %esi # 4-byte Folded Reload
+ adcl 48(%esp), %ebx # 4-byte Folded Reload
+ adcl 44(%esp), %ebp # 4-byte Folded Reload
+ movl %ebp, 44(%esp) # 4-byte Spill
+ adcl 28(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 36(%esp) # 4-byte Spill
+ adcl $0, %ecx
+ movl %ecx, 48(%esp) # 4-byte Spill
+ movl 116(%esp), %eax
+ movl 12(%eax), %ebp
+ movl %ebp, %eax
+ mull 52(%esp) # 4-byte Folded Reload
+ movl %edx, %edi
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl %ebp, %eax
+ mull 56(%esp) # 4-byte Folded Reload
+ movl %edx, 56(%esp) # 4-byte Spill
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl %ebp, %eax
+ mull 64(%esp) # 4-byte Folded Reload
+ movl %edx, %ecx
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl %ebp, %eax
+ mull 60(%esp) # 4-byte Folded Reload
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl %edx, %ebp
+ addl 32(%esp), %ebp # 4-byte Folded Reload
+ adcl 40(%esp), %ecx # 4-byte Folded Reload
+ movl 56(%esp), %edx # 4-byte Reload
+ adcl 52(%esp), %edx # 4-byte Folded Reload
+ movl %edi, %eax
+ adcl $0, %eax
+ movl 64(%esp), %edi # 4-byte Reload
+ addl %esi, %edi
+ movl %edi, 64(%esp) # 4-byte Spill
+ adcl %ebx, %ebp
+ adcl 44(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 60(%esp) # 4-byte Spill
+ adcl 36(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 56(%esp) # 4-byte Spill
+ adcl 48(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 52(%esp) # 4-byte Spill
+ sbbl %ebx, %ebx
+ movl 80(%esp), %esi # 4-byte Reload
+ imull %edi, %esi
+ movl %esi, 80(%esp) # 4-byte Spill
+ andl $1, %ebx
+ movl %esi, %eax
+ mull 76(%esp) # 4-byte Folded Reload
+ movl %edx, %edi
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl %esi, %eax
+ mull 84(%esp) # 4-byte Folded Reload
+ movl %edx, 44(%esp) # 4-byte Spill
+ movl %eax, %ecx
+ movl %esi, %eax
+ mull 72(%esp) # 4-byte Folded Reload
+ movl %edx, %esi
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ mull 68(%esp) # 4-byte Folded Reload
+ addl 44(%esp), %eax # 4-byte Folded Reload
+ adcl 40(%esp), %edx # 4-byte Folded Reload
+ adcl 48(%esp), %esi # 4-byte Folded Reload
+ adcl $0, %edi
+ addl 64(%esp), %ecx # 4-byte Folded Reload
+ adcl %ebp, %eax
+ adcl 60(%esp), %edx # 4-byte Folded Reload
+ adcl 56(%esp), %esi # 4-byte Folded Reload
+ adcl 52(%esp), %edi # 4-byte Folded Reload
+ adcl $0, %ebx
+ movl %eax, %ebp
+ subl 84(%esp), %ebp # 4-byte Folded Reload
+ movl %edx, %ecx
+ sbbl 68(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 80(%esp) # 4-byte Spill
+ movl %esi, %ecx
+ sbbl 72(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 84(%esp) # 4-byte Spill
+ movl %edi, %ecx
+ sbbl 76(%esp), %ecx # 4-byte Folded Reload
+ sbbl $0, %ebx
+ andl $1, %ebx
+ jne .LBB53_2
+# BB#1:
+ movl %ebp, %eax
+.LBB53_2:
+ movl 108(%esp), %ebp
+ movl %eax, (%ebp)
+ testb %bl, %bl
+ jne .LBB53_4
+# BB#3:
+ movl 80(%esp), %edx # 4-byte Reload
+.LBB53_4:
+ movl %edx, 4(%ebp)
+ jne .LBB53_6
+# BB#5:
+ movl 84(%esp), %esi # 4-byte Reload
+.LBB53_6:
+ movl %esi, 8(%ebp)
+ jne .LBB53_8
+# BB#7:
+ movl %ecx, %edi
+.LBB53_8:
+ movl %edi, 12(%ebp)
+ addl $88, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end53:
+ .size mcl_fp_mont4L, .Lfunc_end53-mcl_fp_mont4L
+
+ .globl mcl_fp_montNF4L
+ .align 16, 0x90
+ .type mcl_fp_montNF4L,@function
+mcl_fp_montNF4L: # @mcl_fp_montNF4L
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $84, %esp
+ movl 108(%esp), %ecx
+ movl (%ecx), %eax
+ movl %ecx, %ebp
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 112(%esp), %ecx
+ movl (%ecx), %ecx
+ movl %ecx, 36(%esp) # 4-byte Spill
+ mull %ecx
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl %edx, 76(%esp) # 4-byte Spill
+ movl 116(%esp), %esi
+ movl -4(%esi), %edx
+ movl %edx, 56(%esp) # 4-byte Spill
+ movl %eax, %ecx
+ imull %edx, %ecx
+ movl (%esi), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 12(%esi), %edx
+ movl %edx, 72(%esp) # 4-byte Spill
+ movl 8(%esi), %edi
+ movl %edi, 68(%esp) # 4-byte Spill
+ movl 4(%esi), %ebx
+ movl %ebx, 64(%esp) # 4-byte Spill
+ movl %ebp, %eax
+ movl 4(%eax), %esi
+ movl %esi, 60(%esp) # 4-byte Spill
+ movl 12(%eax), %ebp
+ movl %ebp, 44(%esp) # 4-byte Spill
+ movl 8(%eax), %esi
+ movl %esi, 48(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ mull %edx
+ movl %edx, 32(%esp) # 4-byte Spill
+ movl %eax, 24(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ mull %edi
+ movl %edx, 28(%esp) # 4-byte Spill
+ movl %eax, 16(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ mull %ebx
+ movl %edx, 20(%esp) # 4-byte Spill
+ movl %eax, 8(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ mull 80(%esp) # 4-byte Folded Reload
+ movl %edx, 12(%esp) # 4-byte Spill
+ movl %eax, %edi
+ movl %ebp, %eax
+ movl 36(%esp), %ebx # 4-byte Reload
+ mull %ebx
+ movl %edx, %ecx
+ movl %eax, 4(%esp) # 4-byte Spill
+ movl %esi, %eax
+ mull %ebx
+ movl %ebx, %esi
+ movl %edx, %ebx
+ movl %eax, (%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ mull %esi
+ movl %edx, %esi
+ movl %eax, %ebp
+ addl 76(%esp), %ebp # 4-byte Folded Reload
+ adcl (%esp), %esi # 4-byte Folded Reload
+ adcl 4(%esp), %ebx # 4-byte Folded Reload
+ adcl $0, %ecx
+ addl 40(%esp), %edi # 4-byte Folded Reload
+ adcl 8(%esp), %ebp # 4-byte Folded Reload
+ adcl 16(%esp), %esi # 4-byte Folded Reload
+ adcl 24(%esp), %ebx # 4-byte Folded Reload
+ adcl $0, %ecx
+ addl 12(%esp), %ebp # 4-byte Folded Reload
+ adcl 20(%esp), %esi # 4-byte Folded Reload
+ adcl 28(%esp), %ebx # 4-byte Folded Reload
+ adcl 32(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 40(%esp) # 4-byte Spill
+ movl 112(%esp), %eax
+ movl 4(%eax), %edi
+ movl %edi, %eax
+ mull 44(%esp) # 4-byte Folded Reload
+ movl %edx, 76(%esp) # 4-byte Spill
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl %edi, %eax
+ mull 48(%esp) # 4-byte Folded Reload
+ movl %edx, 36(%esp) # 4-byte Spill
+ movl %eax, 24(%esp) # 4-byte Spill
+ movl %edi, %eax
+ mull 60(%esp) # 4-byte Folded Reload
+ movl %edx, %ecx
+ movl %eax, 20(%esp) # 4-byte Spill
+ movl %edi, %eax
+ mull 52(%esp) # 4-byte Folded Reload
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl %edx, %edi
+ addl 20(%esp), %edi # 4-byte Folded Reload
+ adcl 24(%esp), %ecx # 4-byte Folded Reload
+ movl 36(%esp), %edx # 4-byte Reload
+ adcl 28(%esp), %edx # 4-byte Folded Reload
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl $0, %eax
+ addl %ebp, 32(%esp) # 4-byte Folded Spill
+ adcl %esi, %edi
+ adcl %ebx, %ecx
+ movl %ecx, 28(%esp) # 4-byte Spill
+ adcl 40(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 36(%esp) # 4-byte Spill
+ adcl $0, %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 32(%esp), %esi # 4-byte Reload
+ movl %esi, %ecx
+ imull 56(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, %eax
+ mull 72(%esp) # 4-byte Folded Reload
+ movl %edx, 24(%esp) # 4-byte Spill
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ mull 68(%esp) # 4-byte Folded Reload
+ movl %edx, 20(%esp) # 4-byte Spill
+ movl %eax, %ebp
+ movl %ecx, %eax
+ mull 64(%esp) # 4-byte Folded Reload
+ movl %edx, 16(%esp) # 4-byte Spill
+ movl %eax, %ebx
+ movl %ecx, %eax
+ mull 80(%esp) # 4-byte Folded Reload
+ addl %esi, %eax
+ adcl %edi, %ebx
+ adcl 28(%esp), %ebp # 4-byte Folded Reload
+ movl 40(%esp), %ecx # 4-byte Reload
+ adcl 36(%esp), %ecx # 4-byte Folded Reload
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl $0, %eax
+ addl %edx, %ebx
+ adcl 16(%esp), %ebp # 4-byte Folded Reload
+ movl %ebp, 28(%esp) # 4-byte Spill
+ adcl 20(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 40(%esp) # 4-byte Spill
+ adcl 24(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 112(%esp), %eax
+ movl 8(%eax), %ecx
+ movl %ecx, %eax
+ mull 44(%esp) # 4-byte Folded Reload
+ movl %edx, %esi
+ movl %eax, 24(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ mull 48(%esp) # 4-byte Folded Reload
+ movl %edx, 36(%esp) # 4-byte Spill
+ movl %eax, 20(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ mull 60(%esp) # 4-byte Folded Reload
+ movl %edx, %ebp
+ movl %eax, %edi
+ movl %ecx, %eax
+ mull 52(%esp) # 4-byte Folded Reload
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl %edx, %ecx
+ addl %edi, %ecx
+ adcl 20(%esp), %ebp # 4-byte Folded Reload
+ movl 36(%esp), %eax # 4-byte Reload
+ adcl 24(%esp), %eax # 4-byte Folded Reload
+ adcl $0, %esi
+ movl 32(%esp), %edx # 4-byte Reload
+ addl %ebx, %edx
+ movl %edx, 32(%esp) # 4-byte Spill
+ adcl 28(%esp), %ecx # 4-byte Folded Reload
+ adcl 40(%esp), %ebp # 4-byte Folded Reload
+ adcl 76(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 36(%esp) # 4-byte Spill
+ adcl $0, %esi
+ movl %edx, %ebx
+ imull 56(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, %eax
+ mull 72(%esp) # 4-byte Folded Reload
+ movl %edx, 28(%esp) # 4-byte Spill
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl %ebx, %eax
+ mull 68(%esp) # 4-byte Folded Reload
+ movl %edx, 24(%esp) # 4-byte Spill
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl %ebx, %eax
+ mull 64(%esp) # 4-byte Folded Reload
+ movl %edx, 20(%esp) # 4-byte Spill
+ movl %eax, %edi
+ movl %ebx, %eax
+ mull 80(%esp) # 4-byte Folded Reload
+ addl 32(%esp), %eax # 4-byte Folded Reload
+ adcl %ecx, %edi
+ movl 40(%esp), %ecx # 4-byte Reload
+ adcl %ebp, %ecx
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 36(%esp), %eax # 4-byte Folded Reload
+ adcl $0, %esi
+ addl %edx, %edi
+ adcl 20(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 40(%esp) # 4-byte Spill
+ adcl 24(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 76(%esp) # 4-byte Spill
+ adcl 28(%esp), %esi # 4-byte Folded Reload
+ movl 112(%esp), %eax
+ movl 12(%eax), %ecx
+ movl %ecx, %eax
+ mull 44(%esp) # 4-byte Folded Reload
+ movl %edx, %ebx
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ mull 48(%esp) # 4-byte Folded Reload
+ movl %edx, 48(%esp) # 4-byte Spill
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ mull 60(%esp) # 4-byte Folded Reload
+ movl %edx, %ebp
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ mull 52(%esp) # 4-byte Folded Reload
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl %edx, %ecx
+ addl 32(%esp), %ecx # 4-byte Folded Reload
+ adcl 36(%esp), %ebp # 4-byte Folded Reload
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 44(%esp), %eax # 4-byte Folded Reload
+ adcl $0, %ebx
+ movl 60(%esp), %edx # 4-byte Reload
+ addl %edi, %edx
+ movl %edx, 60(%esp) # 4-byte Spill
+ adcl 40(%esp), %ecx # 4-byte Folded Reload
+ adcl 76(%esp), %ebp # 4-byte Folded Reload
+ movl %ebp, 40(%esp) # 4-byte Spill
+ adcl %esi, %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ adcl $0, %ebx
+ movl 56(%esp), %edi # 4-byte Reload
+ imull %edx, %edi
+ movl %edi, %eax
+ mull 72(%esp) # 4-byte Folded Reload
+ movl %edx, 76(%esp) # 4-byte Spill
+ movl %eax, %esi
+ movl %edi, %eax
+ mull 80(%esp) # 4-byte Folded Reload
+ movl %edx, 56(%esp) # 4-byte Spill
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl %edi, %eax
+ movl %edi, %ebp
+ mull 68(%esp) # 4-byte Folded Reload
+ movl %edx, 52(%esp) # 4-byte Spill
+ movl %eax, %edi
+ movl %ebp, %eax
+ mull 64(%esp) # 4-byte Folded Reload
+ movl 44(%esp), %ebp # 4-byte Reload
+ addl 60(%esp), %ebp # 4-byte Folded Reload
+ adcl %ecx, %eax
+ adcl 40(%esp), %edi # 4-byte Folded Reload
+ adcl 48(%esp), %esi # 4-byte Folded Reload
+ adcl $0, %ebx
+ addl 56(%esp), %eax # 4-byte Folded Reload
+ adcl %edx, %edi
+ adcl 52(%esp), %esi # 4-byte Folded Reload
+ adcl 76(%esp), %ebx # 4-byte Folded Reload
+ movl %eax, %edx
+ subl 80(%esp), %edx # 4-byte Folded Reload
+ movl %edi, %ebp
+ sbbl 64(%esp), %ebp # 4-byte Folded Reload
+ movl %esi, %ecx
+ sbbl 68(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 80(%esp) # 4-byte Spill
+ movl %ebx, %ecx
+ sbbl 72(%esp), %ecx # 4-byte Folded Reload
+ testl %ecx, %ecx
+ js .LBB54_2
+# BB#1:
+ movl %edx, %eax
+.LBB54_2:
+ movl 104(%esp), %edx
+ movl %eax, (%edx)
+ js .LBB54_4
+# BB#3:
+ movl %ebp, %edi
+.LBB54_4:
+ movl %edi, 4(%edx)
+ js .LBB54_6
+# BB#5:
+ movl 80(%esp), %esi # 4-byte Reload
+.LBB54_6:
+ movl %esi, 8(%edx)
+ js .LBB54_8
+# BB#7:
+ movl %ecx, %ebx
+.LBB54_8:
+ movl %ebx, 12(%edx)
+ addl $84, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end54:
+ .size mcl_fp_montNF4L, .Lfunc_end54-mcl_fp_montNF4L
+
+ .globl mcl_fp_montRed4L
+ .align 16, 0x90
+ .type mcl_fp_montRed4L,@function
+mcl_fp_montRed4L: # @mcl_fp_montRed4L
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $64, %esp
+ movl 92(%esp), %eax
+ movl -4(%eax), %edx
+ movl %edx, 56(%esp) # 4-byte Spill
+ movl (%eax), %ecx
+ movl %ecx, 60(%esp) # 4-byte Spill
+ movl 88(%esp), %ecx
+ movl (%ecx), %esi
+ movl %esi, 40(%esp) # 4-byte Spill
+ imull %edx, %esi
+ movl 12(%eax), %edx
+ movl %edx, 44(%esp) # 4-byte Spill
+ movl 8(%eax), %ebx
+ movl %ebx, 52(%esp) # 4-byte Spill
+ movl 4(%eax), %ebp
+ movl %ebp, 48(%esp) # 4-byte Spill
+ movl %esi, %eax
+ mull %edx
+ movl %edx, 36(%esp) # 4-byte Spill
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl %esi, %eax
+ mull %ebx
+ movl %edx, 32(%esp) # 4-byte Spill
+ movl %eax, 24(%esp) # 4-byte Spill
+ movl %esi, %eax
+ mull %ebp
+ movl %edx, %ebp
+ movl %eax, %ebx
+ movl %esi, %eax
+ mull 60(%esp) # 4-byte Folded Reload
+ movl %edx, %edi
+ addl %ebx, %edi
+ adcl 24(%esp), %ebp # 4-byte Folded Reload
+ movl 32(%esp), %esi # 4-byte Reload
+ adcl 28(%esp), %esi # 4-byte Folded Reload
+ movl 36(%esp), %edx # 4-byte Reload
+ adcl $0, %edx
+ addl 40(%esp), %eax # 4-byte Folded Reload
+ adcl 4(%ecx), %edi
+ adcl 8(%ecx), %ebp
+ adcl 12(%ecx), %esi
+ movl %esi, 32(%esp) # 4-byte Spill
+ adcl 16(%ecx), %edx
+ movl %edx, 36(%esp) # 4-byte Spill
+ movl 28(%ecx), %eax
+ movl 24(%ecx), %edx
+ movl 20(%ecx), %ecx
+ adcl $0, %ecx
+ movl %ecx, 16(%esp) # 4-byte Spill
+ adcl $0, %edx
+ movl %edx, 20(%esp) # 4-byte Spill
+ adcl $0, %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ sbbl %eax, %eax
+ andl $1, %eax
+ movl %eax, 8(%esp) # 4-byte Spill
+ movl %edi, %ebx
+ imull 56(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, %eax
+ mull 44(%esp) # 4-byte Folded Reload
+ movl %edx, 28(%esp) # 4-byte Spill
+ movl %eax, 12(%esp) # 4-byte Spill
+ movl %ebx, %eax
+ mull 52(%esp) # 4-byte Folded Reload
+ movl %edx, 24(%esp) # 4-byte Spill
+ movl %eax, 4(%esp) # 4-byte Spill
+ movl %ebx, %eax
+ mull 48(%esp) # 4-byte Folded Reload
+ movl %edx, %ecx
+ movl %eax, (%esp) # 4-byte Spill
+ movl %ebx, %eax
+ mull 60(%esp) # 4-byte Folded Reload
+ movl %edx, %esi
+ addl (%esp), %esi # 4-byte Folded Reload
+ movl %ecx, %ebx
+ adcl 4(%esp), %ebx # 4-byte Folded Reload
+ movl 24(%esp), %ecx # 4-byte Reload
+ adcl 12(%esp), %ecx # 4-byte Folded Reload
+ movl 28(%esp), %edx # 4-byte Reload
+ adcl $0, %edx
+ addl %edi, %eax
+ adcl %ebp, %esi
+ adcl 32(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 12(%esp) # 4-byte Spill
+ adcl 36(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 24(%esp) # 4-byte Spill
+ movl 8(%esp), %edi # 4-byte Reload
+ adcl 16(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 28(%esp) # 4-byte Spill
+ adcl $0, 20(%esp) # 4-byte Folded Spill
+ adcl $0, 40(%esp) # 4-byte Folded Spill
+ adcl $0, %edi
+ movl %esi, %ebp
+ imull 56(%esp), %ebp # 4-byte Folded Reload
+ movl %ebp, %eax
+ mull 44(%esp) # 4-byte Folded Reload
+ movl %edx, 36(%esp) # 4-byte Spill
+ movl %eax, 16(%esp) # 4-byte Spill
+ movl %ebp, %eax
+ mull 52(%esp) # 4-byte Folded Reload
+ movl %edx, 32(%esp) # 4-byte Spill
+ movl %eax, %ebx
+ movl %ebp, %eax
+ mull 48(%esp) # 4-byte Folded Reload
+ movl %edx, %ecx
+ movl %eax, 8(%esp) # 4-byte Spill
+ movl %ebp, %eax
+ mull 60(%esp) # 4-byte Folded Reload
+ movl %edx, %ebp
+ addl 8(%esp), %ebp # 4-byte Folded Reload
+ adcl %ebx, %ecx
+ movl %ecx, %ebx
+ movl 32(%esp), %ecx # 4-byte Reload
+ adcl 16(%esp), %ecx # 4-byte Folded Reload
+ movl 36(%esp), %edx # 4-byte Reload
+ adcl $0, %edx
+ addl %esi, %eax
+ adcl 12(%esp), %ebp # 4-byte Folded Reload
+ adcl 24(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 24(%esp) # 4-byte Spill
+ adcl 28(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 32(%esp) # 4-byte Spill
+ adcl 20(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 36(%esp) # 4-byte Spill
+ adcl $0, 40(%esp) # 4-byte Folded Spill
+ adcl $0, %edi
+ movl 56(%esp), %esi # 4-byte Reload
+ imull %ebp, %esi
+ movl %esi, 56(%esp) # 4-byte Spill
+ movl %esi, %eax
+ mull 44(%esp) # 4-byte Folded Reload
+ movl %edx, %ecx
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl %esi, %eax
+ mull 52(%esp) # 4-byte Folded Reload
+ movl %edx, %ebx
+ movl %eax, 20(%esp) # 4-byte Spill
+ movl %esi, %eax
+ mull 60(%esp) # 4-byte Folded Reload
+ movl %edx, 16(%esp) # 4-byte Spill
+ movl %eax, %esi
+ movl 56(%esp), %eax # 4-byte Reload
+ mull 48(%esp) # 4-byte Folded Reload
+ addl 16(%esp), %eax # 4-byte Folded Reload
+ adcl 20(%esp), %edx # 4-byte Folded Reload
+ adcl 28(%esp), %ebx # 4-byte Folded Reload
+ adcl $0, %ecx
+ addl %ebp, %esi
+ adcl 24(%esp), %eax # 4-byte Folded Reload
+ adcl 32(%esp), %edx # 4-byte Folded Reload
+ adcl 36(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 56(%esp) # 4-byte Spill
+ adcl 40(%esp), %ecx # 4-byte Folded Reload
+ adcl $0, %edi
+ movl %eax, %ebp
+ subl 60(%esp), %ebp # 4-byte Folded Reload
+ movl %edx, %esi
+ sbbl 48(%esp), %esi # 4-byte Folded Reload
+ sbbl 52(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 60(%esp) # 4-byte Spill
+ movl %ecx, %ebx
+ sbbl 44(%esp), %ebx # 4-byte Folded Reload
+ sbbl $0, %edi
+ andl $1, %edi
+ jne .LBB55_2
+# BB#1:
+ movl %ebp, %eax
+.LBB55_2:
+ movl 84(%esp), %ebp
+ movl %eax, (%ebp)
+ movl %edi, %eax
+ testb %al, %al
+ jne .LBB55_4
+# BB#3:
+ movl %esi, %edx
+.LBB55_4:
+ movl %edx, 4(%ebp)
+ movl 56(%esp), %eax # 4-byte Reload
+ jne .LBB55_6
+# BB#5:
+ movl 60(%esp), %eax # 4-byte Reload
+.LBB55_6:
+ movl %eax, 8(%ebp)
+ jne .LBB55_8
+# BB#7:
+ movl %ebx, %ecx
+.LBB55_8:
+ movl %ecx, 12(%ebp)
+ addl $64, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end55:
+ .size mcl_fp_montRed4L, .Lfunc_end55-mcl_fp_montRed4L
+
+ .globl mcl_fp_addPre4L
+ .align 16, 0x90
+ .type mcl_fp_addPre4L,@function
+mcl_fp_addPre4L: # @mcl_fp_addPre4L
+# BB#0:
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ movl 24(%esp), %eax
+ movl (%eax), %ecx
+ movl 4(%eax), %edx
+ movl 20(%esp), %esi
+ addl (%esi), %ecx
+ adcl 4(%esi), %edx
+ movl 12(%eax), %edi
+ movl 8(%eax), %eax
+ adcl 8(%esi), %eax
+ movl 12(%esi), %esi
+ movl 16(%esp), %ebx
+ movl %ecx, (%ebx)
+ movl %edx, 4(%ebx)
+ movl %eax, 8(%ebx)
+ adcl %edi, %esi
+ movl %esi, 12(%ebx)
+ sbbl %eax, %eax
+ andl $1, %eax
+ popl %esi
+ popl %edi
+ popl %ebx
+ retl
+.Lfunc_end56:
+ .size mcl_fp_addPre4L, .Lfunc_end56-mcl_fp_addPre4L
+
+ .globl mcl_fp_subPre4L
+ .align 16, 0x90
+ .type mcl_fp_subPre4L,@function
+mcl_fp_subPre4L: # @mcl_fp_subPre4L
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ movl 24(%esp), %ecx
+ movl (%ecx), %edx
+ movl 4(%ecx), %esi
+ xorl %eax, %eax
+ movl 28(%esp), %edi
+ subl (%edi), %edx
+ sbbl 4(%edi), %esi
+ movl 8(%ecx), %ebx
+ sbbl 8(%edi), %ebx
+ movl 12(%edi), %edi
+ movl 12(%ecx), %ecx
+ movl 20(%esp), %ebp
+ movl %edx, (%ebp)
+ movl %esi, 4(%ebp)
+ movl %ebx, 8(%ebp)
+ sbbl %edi, %ecx
+ movl %ecx, 12(%ebp)
+ sbbl $0, %eax
+ andl $1, %eax
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end57:
+ .size mcl_fp_subPre4L, .Lfunc_end57-mcl_fp_subPre4L
+
+ .globl mcl_fp_shr1_4L
+ .align 16, 0x90
+ .type mcl_fp_shr1_4L,@function
+mcl_fp_shr1_4L: # @mcl_fp_shr1_4L
+# BB#0:
+ pushl %edi
+ pushl %esi
+ movl 16(%esp), %eax
+ movl 12(%eax), %ecx
+ movl 8(%eax), %edx
+ movl (%eax), %esi
+ movl 4(%eax), %eax
+ shrdl $1, %eax, %esi
+ movl 12(%esp), %edi
+ movl %esi, (%edi)
+ shrdl $1, %edx, %eax
+ movl %eax, 4(%edi)
+ shrdl $1, %ecx, %edx
+ movl %edx, 8(%edi)
+ shrl %ecx
+ movl %ecx, 12(%edi)
+ popl %esi
+ popl %edi
+ retl
+.Lfunc_end58:
+ .size mcl_fp_shr1_4L, .Lfunc_end58-mcl_fp_shr1_4L
+
+ .globl mcl_fp_add4L
+ .align 16, 0x90
+ .type mcl_fp_add4L,@function
+mcl_fp_add4L: # @mcl_fp_add4L
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ movl 28(%esp), %edi
+ movl (%edi), %eax
+ movl 4(%edi), %ecx
+ movl 24(%esp), %esi
+ addl (%esi), %eax
+ adcl 4(%esi), %ecx
+ movl 8(%edi), %edx
+ adcl 8(%esi), %edx
+ movl 12(%esi), %esi
+ adcl 12(%edi), %esi
+ movl 20(%esp), %edi
+ movl %eax, (%edi)
+ movl %ecx, 4(%edi)
+ movl %edx, 8(%edi)
+ movl %esi, 12(%edi)
+ sbbl %ebx, %ebx
+ andl $1, %ebx
+ movl 32(%esp), %ebp
+ subl (%ebp), %eax
+ sbbl 4(%ebp), %ecx
+ sbbl 8(%ebp), %edx
+ sbbl 12(%ebp), %esi
+ sbbl $0, %ebx
+ testb $1, %bl
+ jne .LBB59_2
+# BB#1: # %nocarry
+ movl %eax, (%edi)
+ movl %ecx, 4(%edi)
+ movl %edx, 8(%edi)
+ movl %esi, 12(%edi)
+.LBB59_2: # %carry
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end59:
+ .size mcl_fp_add4L, .Lfunc_end59-mcl_fp_add4L
+
+ .globl mcl_fp_addNF4L
+ .align 16, 0x90
+ .type mcl_fp_addNF4L,@function
+mcl_fp_addNF4L: # @mcl_fp_addNF4L
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $8, %esp
+ movl 36(%esp), %edx
+ movl (%edx), %esi
+ movl 4(%edx), %ecx
+ movl 32(%esp), %edi
+ addl (%edi), %esi
+ adcl 4(%edi), %ecx
+ movl 12(%edx), %ebp
+ movl 8(%edx), %edx
+ adcl 8(%edi), %edx
+ adcl 12(%edi), %ebp
+ movl 40(%esp), %eax
+ movl %esi, %ebx
+ subl (%eax), %ebx
+ movl %ecx, %edi
+ sbbl 4(%eax), %edi
+ movl %edi, (%esp) # 4-byte Spill
+ movl %edx, %edi
+ movl 40(%esp), %eax
+ sbbl 8(%eax), %edi
+ movl %edi, 4(%esp) # 4-byte Spill
+ movl %ebp, %edi
+ movl 40(%esp), %eax
+ sbbl 12(%eax), %edi
+ testl %edi, %edi
+ js .LBB60_2
+# BB#1:
+ movl %ebx, %esi
+.LBB60_2:
+ movl 28(%esp), %ebx
+ movl %esi, (%ebx)
+ js .LBB60_4
+# BB#3:
+ movl (%esp), %ecx # 4-byte Reload
+.LBB60_4:
+ movl %ecx, 4(%ebx)
+ js .LBB60_6
+# BB#5:
+ movl 4(%esp), %edx # 4-byte Reload
+.LBB60_6:
+ movl %edx, 8(%ebx)
+ js .LBB60_8
+# BB#7:
+ movl %edi, %ebp
+.LBB60_8:
+ movl %ebp, 12(%ebx)
+ addl $8, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end60:
+ .size mcl_fp_addNF4L, .Lfunc_end60-mcl_fp_addNF4L
+
+ .globl mcl_fp_sub4L
+ .align 16, 0x90
+ .type mcl_fp_sub4L,@function
+mcl_fp_sub4L: # @mcl_fp_sub4L
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ movl 24(%esp), %esi
+ movl (%esi), %eax
+ movl 4(%esi), %ecx
+ xorl %ebx, %ebx
+ movl 28(%esp), %edi
+ subl (%edi), %eax
+ sbbl 4(%edi), %ecx
+ movl 8(%esi), %edx
+ sbbl 8(%edi), %edx
+ movl 12(%esi), %esi
+ sbbl 12(%edi), %esi
+ movl 20(%esp), %edi
+ movl %eax, (%edi)
+ movl %ecx, 4(%edi)
+ movl %edx, 8(%edi)
+ movl %esi, 12(%edi)
+ sbbl $0, %ebx
+ testb $1, %bl
+ je .LBB61_2
+# BB#1: # %carry
+ movl 32(%esp), %ebx
+ addl (%ebx), %eax
+ movl 8(%ebx), %ebp
+ adcl 4(%ebx), %ecx
+ movl 12(%ebx), %ebx
+ movl %eax, (%edi)
+ movl %ecx, 4(%edi)
+ adcl %edx, %ebp
+ movl %ebp, 8(%edi)
+ adcl %esi, %ebx
+ movl %ebx, 12(%edi)
+.LBB61_2: # %nocarry
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end61:
+ .size mcl_fp_sub4L, .Lfunc_end61-mcl_fp_sub4L
+
+ .globl mcl_fp_subNF4L
+ .align 16, 0x90
+ .type mcl_fp_subNF4L,@function
+mcl_fp_subNF4L: # @mcl_fp_subNF4L
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $8, %esp
+ movl 32(%esp), %eax
+ movl (%eax), %edx
+ movl 4(%eax), %ecx
+ movl 36(%esp), %esi
+ subl (%esi), %edx
+ movl %edx, (%esp) # 4-byte Spill
+ sbbl 4(%esi), %ecx
+ movl %ecx, 4(%esp) # 4-byte Spill
+ movl 12(%eax), %edi
+ movl 8(%eax), %edx
+ sbbl 8(%esi), %edx
+ sbbl 12(%esi), %edi
+ movl %edi, %esi
+ sarl $31, %esi
+ movl 40(%esp), %eax
+ movl 12(%eax), %ebp
+ andl %esi, %ebp
+ movl 8(%eax), %ecx
+ andl %esi, %ecx
+ movl 40(%esp), %eax
+ movl 4(%eax), %eax
+ andl %esi, %eax
+ movl 40(%esp), %ebx
+ andl (%ebx), %esi
+ addl (%esp), %esi # 4-byte Folded Reload
+ adcl 4(%esp), %eax # 4-byte Folded Reload
+ movl 28(%esp), %ebx
+ movl %esi, (%ebx)
+ adcl %edx, %ecx
+ movl %eax, 4(%ebx)
+ movl %ecx, 8(%ebx)
+ adcl %edi, %ebp
+ movl %ebp, 12(%ebx)
+ addl $8, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end62:
+ .size mcl_fp_subNF4L, .Lfunc_end62-mcl_fp_subNF4L
+
+ .globl mcl_fpDbl_add4L
+ .align 16, 0x90
+ .type mcl_fpDbl_add4L,@function
+mcl_fpDbl_add4L: # @mcl_fpDbl_add4L
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $12, %esp
+ movl 40(%esp), %eax
+ movl (%eax), %edi
+ movl 4(%eax), %edx
+ movl 36(%esp), %esi
+ addl (%esi), %edi
+ adcl 4(%esi), %edx
+ movl 8(%eax), %ebx
+ adcl 8(%esi), %ebx
+ movl 12(%esi), %ebp
+ movl 32(%esp), %ecx
+ movl %edi, (%ecx)
+ movl 16(%esi), %edi
+ adcl 12(%eax), %ebp
+ adcl 16(%eax), %edi
+ movl %edx, 4(%ecx)
+ movl 28(%eax), %edx
+ movl %edx, 8(%esp) # 4-byte Spill
+ movl %ebx, 8(%ecx)
+ movl 24(%eax), %ebx
+ movl 20(%eax), %eax
+ movl %ebp, 12(%ecx)
+ movl 20(%esi), %edx
+ adcl %eax, %edx
+ movl 28(%esi), %ecx
+ movl 24(%esi), %ebp
+ adcl %ebx, %ebp
+ adcl 8(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 8(%esp) # 4-byte Spill
+ sbbl %ebx, %ebx
+ andl $1, %ebx
+ movl 44(%esp), %eax
+ movl %edi, %esi
+ subl (%eax), %esi
+ movl %esi, (%esp) # 4-byte Spill
+ movl %edx, %esi
+ sbbl 4(%eax), %esi
+ movl %esi, 4(%esp) # 4-byte Spill
+ movl %ebp, %esi
+ sbbl 8(%eax), %esi
+ sbbl 12(%eax), %ecx
+ sbbl $0, %ebx
+ andl $1, %ebx
+ jne .LBB63_2
+# BB#1:
+ movl %esi, %ebp
+.LBB63_2:
+ testb %bl, %bl
+ jne .LBB63_4
+# BB#3:
+ movl (%esp), %edi # 4-byte Reload
+.LBB63_4:
+ movl 32(%esp), %eax
+ movl %edi, 16(%eax)
+ jne .LBB63_6
+# BB#5:
+ movl 4(%esp), %edx # 4-byte Reload
+.LBB63_6:
+ movl %edx, 20(%eax)
+ movl %ebp, 24(%eax)
+ movl 8(%esp), %edx # 4-byte Reload
+ jne .LBB63_8
+# BB#7:
+ movl %ecx, %edx
+.LBB63_8:
+ movl %edx, 28(%eax)
+ addl $12, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end63:
+ .size mcl_fpDbl_add4L, .Lfunc_end63-mcl_fpDbl_add4L
+
+ .globl mcl_fpDbl_sub4L
+ .align 16, 0x90
+ .type mcl_fpDbl_sub4L,@function
+mcl_fpDbl_sub4L: # @mcl_fpDbl_sub4L
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ pushl %eax
+ movl 28(%esp), %eax
+ movl (%eax), %edx
+ movl 4(%eax), %esi
+ movl 32(%esp), %ebp
+ subl (%ebp), %edx
+ sbbl 4(%ebp), %esi
+ movl 8(%eax), %ebx
+ sbbl 8(%ebp), %ebx
+ movl 24(%esp), %ecx
+ movl %edx, (%ecx)
+ movl 12(%eax), %edx
+ sbbl 12(%ebp), %edx
+ movl %esi, 4(%ecx)
+ movl 16(%eax), %edi
+ sbbl 16(%ebp), %edi
+ movl %ebx, 8(%ecx)
+ movl 20(%ebp), %esi
+ movl %edx, 12(%ecx)
+ movl 20(%eax), %ebx
+ sbbl %esi, %ebx
+ movl 24(%ebp), %edx
+ movl 24(%eax), %esi
+ sbbl %edx, %esi
+ movl 28(%ebp), %edx
+ movl 28(%eax), %eax
+ sbbl %edx, %eax
+ movl %eax, (%esp) # 4-byte Spill
+ movl $0, %edx
+ sbbl $0, %edx
+ andl $1, %edx
+ movl 36(%esp), %ecx
+ movl (%ecx), %eax
+ jne .LBB64_1
+# BB#2:
+ xorl %ebp, %ebp
+ jmp .LBB64_3
+.LBB64_1:
+ movl 4(%ecx), %ebp
+.LBB64_3:
+ testb %dl, %dl
+ jne .LBB64_5
+# BB#4:
+ movl $0, %eax
+.LBB64_5:
+ jne .LBB64_6
+# BB#7:
+ movl $0, %edx
+ jmp .LBB64_8
+.LBB64_6:
+ movl 12(%ecx), %edx
+.LBB64_8:
+ jne .LBB64_9
+# BB#10:
+ xorl %ecx, %ecx
+ jmp .LBB64_11
+.LBB64_9:
+ movl 8(%ecx), %ecx
+.LBB64_11:
+ addl %edi, %eax
+ adcl %ebx, %ebp
+ movl 24(%esp), %edi
+ movl %eax, 16(%edi)
+ adcl %esi, %ecx
+ movl %ebp, 20(%edi)
+ movl %ecx, 24(%edi)
+ adcl (%esp), %edx # 4-byte Folded Reload
+ movl %edx, 28(%edi)
+ addl $4, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end64:
+ .size mcl_fpDbl_sub4L, .Lfunc_end64-mcl_fpDbl_sub4L
+
+ .globl mcl_fp_mulUnitPre5L
+ .align 16, 0x90
+ .type mcl_fp_mulUnitPre5L,@function
+mcl_fp_mulUnitPre5L: # @mcl_fp_mulUnitPre5L
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $20, %esp
+ movl 48(%esp), %esi
+ movl 44(%esp), %ecx
+ movl %esi, %eax
+ mull 16(%ecx)
+ movl %edx, 16(%esp) # 4-byte Spill
+ movl %eax, 12(%esp) # 4-byte Spill
+ movl %esi, %eax
+ mull 12(%ecx)
+ movl %edx, %ebx
+ movl %eax, 8(%esp) # 4-byte Spill
+ movl %esi, %eax
+ mull 8(%ecx)
+ movl %edx, %edi
+ movl %eax, 4(%esp) # 4-byte Spill
+ movl %esi, %eax
+ mull 4(%ecx)
+ movl %edx, %ebp
+ movl %eax, (%esp) # 4-byte Spill
+ movl %esi, %eax
+ mull (%ecx)
+ movl 40(%esp), %ecx
+ movl %eax, (%ecx)
+ addl (%esp), %edx # 4-byte Folded Reload
+ movl %edx, 4(%ecx)
+ adcl 4(%esp), %ebp # 4-byte Folded Reload
+ movl %ebp, 8(%ecx)
+ adcl 8(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 12(%ecx)
+ adcl 12(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 16(%ecx)
+ movl 16(%esp), %eax # 4-byte Reload
+ adcl $0, %eax
+ movl %eax, 20(%ecx)
+ addl $20, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end65:
+ .size mcl_fp_mulUnitPre5L, .Lfunc_end65-mcl_fp_mulUnitPre5L
+
+ .globl mcl_fpDbl_mulPre5L
+ .align 16, 0x90
+ .type mcl_fpDbl_mulPre5L,@function
+mcl_fpDbl_mulPre5L: # @mcl_fpDbl_mulPre5L
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $64, %esp
+ movl 88(%esp), %esi
+ movl (%esi), %ebp
+ movl 92(%esp), %eax
+ movl (%eax), %ebx
+ movl %eax, %edi
+ movl %ebp, %eax
+ mull %ebx
+ movl %edx, 36(%esp) # 4-byte Spill
+ movl 84(%esp), %edx
+ movl %eax, (%edx)
+ movl %esi, %eax
+ movl 4(%eax), %esi
+ movl 8(%eax), %ecx
+ movl %ecx, 8(%esp) # 4-byte Spill
+ movl 12(%eax), %ecx
+ movl %ecx, (%esp) # 4-byte Spill
+ movl 16(%eax), %ecx
+ movl %ecx, 60(%esp) # 4-byte Spill
+ movl 4(%edi), %edi
+ movl %esi, %eax
+ mull %edi
+ movl %edx, 56(%esp) # 4-byte Spill
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl %ebp, %eax
+ mull %edi
+ movl %edx, 52(%esp) # 4-byte Spill
+ movl %eax, 24(%esp) # 4-byte Spill
+ movl %esi, %eax
+ mull %ebx
+ movl %edx, 12(%esp) # 4-byte Spill
+ movl %eax, %ebp
+ movl %ecx, %eax
+ mull %edi
+ movl %edx, 48(%esp) # 4-byte Spill
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl (%esp), %esi # 4-byte Reload
+ movl %esi, %eax
+ mull %edi
+ movl %edx, 44(%esp) # 4-byte Spill
+ movl %eax, 20(%esp) # 4-byte Spill
+ movl 8(%esp), %ecx # 4-byte Reload
+ movl %ecx, %eax
+ mull %edi
+ movl %edx, 32(%esp) # 4-byte Spill
+ movl %eax, 16(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ mull %ebx
+ movl %edx, 60(%esp) # 4-byte Spill
+ movl %eax, 4(%esp) # 4-byte Spill
+ movl %esi, %eax
+ mull %ebx
+ movl %edx, %esi
+ movl %eax, (%esp) # 4-byte Spill
+ movl %ecx, %eax
+ mull %ebx
+ movl %eax, %edi
+ movl %edx, %ebx
+ addl 36(%esp), %ebp # 4-byte Folded Reload
+ adcl 12(%esp), %edi # 4-byte Folded Reload
+ adcl (%esp), %ebx # 4-byte Folded Reload
+ adcl 4(%esp), %esi # 4-byte Folded Reload
+ movl 60(%esp), %ecx # 4-byte Reload
+ adcl $0, %ecx
+ addl 24(%esp), %ebp # 4-byte Folded Reload
+ movl 84(%esp), %eax
+ movl %ebp, 4(%eax)
+ adcl 40(%esp), %edi # 4-byte Folded Reload
+ adcl 16(%esp), %ebx # 4-byte Folded Reload
+ adcl 20(%esp), %esi # 4-byte Folded Reload
+ adcl 28(%esp), %ecx # 4-byte Folded Reload
+ sbbl %ebp, %ebp
+ andl $1, %ebp
+ addl 52(%esp), %edi # 4-byte Folded Reload
+ adcl 56(%esp), %ebx # 4-byte Folded Reload
+ adcl 32(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 16(%esp) # 4-byte Spill
+ adcl 44(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 60(%esp) # 4-byte Spill
+ adcl 48(%esp), %ebp # 4-byte Folded Reload
+ movl 88(%esp), %eax
+ movl %eax, %esi
+ movl 16(%esi), %edx
+ movl %edx, 44(%esp) # 4-byte Spill
+ movl 92(%esp), %eax
+ movl 8(%eax), %ecx
+ movl %edx, %eax
+ mull %ecx
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl %edx, 32(%esp) # 4-byte Spill
+ movl 12(%esi), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ mull %ecx
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl %edx, 20(%esp) # 4-byte Spill
+ movl %esi, %edx
+ movl 8(%edx), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ mull %ecx
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl %edx, 12(%esp) # 4-byte Spill
+ movl %esi, %eax
+ movl (%eax), %esi
+ movl %esi, 28(%esp) # 4-byte Spill
+ movl 4(%eax), %eax
+ movl %eax, 24(%esp) # 4-byte Spill
+ mull %ecx
+ movl %edx, 8(%esp) # 4-byte Spill
+ movl %eax, 4(%esp) # 4-byte Spill
+ movl %esi, %eax
+ mull %ecx
+ addl %edi, %eax
+ movl 84(%esp), %ecx
+ movl %eax, 8(%ecx)
+ movl 4(%esp), %ecx # 4-byte Reload
+ adcl %ebx, %ecx
+ movl 48(%esp), %edi # 4-byte Reload
+ adcl 16(%esp), %edi # 4-byte Folded Reload
+ movl 52(%esp), %esi # 4-byte Reload
+ adcl 60(%esp), %esi # 4-byte Folded Reload
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl %ebp, %eax
+ sbbl %ebx, %ebx
+ andl $1, %ebx
+ addl %edx, %ecx
+ adcl 8(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 48(%esp) # 4-byte Spill
+ adcl 12(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 52(%esp) # 4-byte Spill
+ adcl 20(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 56(%esp) # 4-byte Spill
+ adcl 32(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 20(%esp) # 4-byte Spill
+ movl 92(%esp), %eax
+ movl 12(%eax), %ebp
+ movl 44(%esp), %eax # 4-byte Reload
+ mull %ebp
+ movl %edx, 44(%esp) # 4-byte Spill
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 40(%esp), %eax # 4-byte Reload
+ mull %ebp
+ movl %edx, 40(%esp) # 4-byte Spill
+ movl %eax, %esi
+ movl 36(%esp), %eax # 4-byte Reload
+ mull %ebp
+ movl %edx, 36(%esp) # 4-byte Spill
+ movl %eax, %ebx
+ movl 24(%esp), %eax # 4-byte Reload
+ mull %ebp
+ movl %edx, 32(%esp) # 4-byte Spill
+ movl %eax, %edi
+ movl 28(%esp), %eax # 4-byte Reload
+ mull %ebp
+ movl %edx, 24(%esp) # 4-byte Spill
+ addl %ecx, %eax
+ movl 84(%esp), %edx
+ movl %eax, 12(%edx)
+ adcl 48(%esp), %edi # 4-byte Folded Reload
+ adcl 52(%esp), %ebx # 4-byte Folded Reload
+ adcl 56(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 4(%esp) # 4-byte Spill
+ movl 20(%esp), %eax # 4-byte Reload
+ adcl %eax, 60(%esp) # 4-byte Folded Spill
+ movl 92(%esp), %eax
+ movl 16(%eax), %ebp
+ sbbl %ecx, %ecx
+ movl %ebp, %eax
+ movl 88(%esp), %esi
+ mull 16(%esi)
+ movl %edx, 56(%esp) # 4-byte Spill
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl %ebp, %eax
+ mull 12(%esi)
+ movl %edx, 52(%esp) # 4-byte Spill
+ movl %eax, 16(%esp) # 4-byte Spill
+ movl %ebp, %eax
+ mull 8(%esi)
+ movl %edx, 48(%esp) # 4-byte Spill
+ movl %eax, 12(%esp) # 4-byte Spill
+ movl %ebp, %eax
+ mull 4(%esi)
+ movl %edx, 20(%esp) # 4-byte Spill
+ movl %eax, 8(%esp) # 4-byte Spill
+ movl %ebp, %eax
+ mull (%esi)
+ movl %eax, %ebp
+ andl $1, %ecx
+ addl 24(%esp), %edi # 4-byte Folded Reload
+ adcl 32(%esp), %ebx # 4-byte Folded Reload
+ movl 4(%esp), %esi # 4-byte Reload
+ adcl 36(%esp), %esi # 4-byte Folded Reload
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 40(%esp), %eax # 4-byte Folded Reload
+ adcl 44(%esp), %ecx # 4-byte Folded Reload
+ addl %ebp, %edi
+ movl 84(%esp), %ebp
+ movl %edi, 16(%ebp)
+ adcl 8(%esp), %ebx # 4-byte Folded Reload
+ adcl 12(%esp), %esi # 4-byte Folded Reload
+ movl %eax, %edi
+ adcl 16(%esp), %edi # 4-byte Folded Reload
+ adcl 28(%esp), %ecx # 4-byte Folded Reload
+ sbbl %eax, %eax
+ andl $1, %eax
+ addl %edx, %ebx
+ adcl 20(%esp), %esi # 4-byte Folded Reload
+ movl %ebp, %edx
+ movl %ebx, 20(%edx)
+ adcl 48(%esp), %edi # 4-byte Folded Reload
+ movl %esi, 24(%edx)
+ adcl 52(%esp), %ecx # 4-byte Folded Reload
+ movl %edi, 28(%edx)
+ movl %ecx, 32(%edx)
+ adcl 56(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 36(%edx)
+ addl $64, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end66:
+ .size mcl_fpDbl_mulPre5L, .Lfunc_end66-mcl_fpDbl_mulPre5L
+
+ .globl mcl_fpDbl_sqrPre5L
+ .align 16, 0x90
+ .type mcl_fpDbl_sqrPre5L,@function
+mcl_fpDbl_sqrPre5L: # @mcl_fpDbl_sqrPre5L
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $60, %esp
+ movl 84(%esp), %ebx
+ movl 16(%ebx), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl (%ebx), %edi
+ movl 4(%ebx), %ecx
+ mull %ecx
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl %edx, 40(%esp) # 4-byte Spill
+ movl 12(%ebx), %esi
+ movl %esi, %eax
+ mull %ecx
+ movl %eax, 24(%esp) # 4-byte Spill
+ movl %edx, 36(%esp) # 4-byte Spill
+ movl 8(%ebx), %ebx
+ movl %ebx, %eax
+ mull %ecx
+ movl %edx, 28(%esp) # 4-byte Spill
+ movl %eax, 20(%esp) # 4-byte Spill
+ movl 44(%esp), %eax # 4-byte Reload
+ mull %edi
+ movl %edx, 52(%esp) # 4-byte Spill
+ movl %eax, 12(%esp) # 4-byte Spill
+ movl %esi, %eax
+ mull %edi
+ movl %edx, 48(%esp) # 4-byte Spill
+ movl %eax, 4(%esp) # 4-byte Spill
+ movl %ebx, %eax
+ mull %edi
+ movl %edx, 56(%esp) # 4-byte Spill
+ movl %eax, %ebp
+ movl %ecx, %eax
+ mull %ecx
+ movl %edx, 16(%esp) # 4-byte Spill
+ movl %eax, 8(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ mull %edi
+ movl %edx, %esi
+ movl %esi, (%esp) # 4-byte Spill
+ movl %eax, %ecx
+ movl %edi, %eax
+ mull %edi
+ movl 80(%esp), %edi
+ movl %eax, (%edi)
+ addl %ecx, %edx
+ adcl %esi, %ebp
+ movl 56(%esp), %esi # 4-byte Reload
+ adcl 4(%esp), %esi # 4-byte Folded Reload
+ movl 48(%esp), %edi # 4-byte Reload
+ adcl 12(%esp), %edi # 4-byte Folded Reload
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl $0, %eax
+ addl %ecx, %edx
+ movl 80(%esp), %ecx
+ movl %edx, 4(%ecx)
+ adcl 8(%esp), %ebp # 4-byte Folded Reload
+ movl %esi, %ecx
+ adcl 20(%esp), %ecx # 4-byte Folded Reload
+ adcl 24(%esp), %edi # 4-byte Folded Reload
+ adcl 32(%esp), %eax # 4-byte Folded Reload
+ sbbl %esi, %esi
+ andl $1, %esi
+ addl (%esp), %ebp # 4-byte Folded Reload
+ adcl 16(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 56(%esp) # 4-byte Spill
+ adcl 28(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 48(%esp) # 4-byte Spill
+ adcl 36(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 52(%esp) # 4-byte Spill
+ adcl 40(%esp), %esi # 4-byte Folded Reload
+ movl 44(%esp), %eax # 4-byte Reload
+ mull %ebx
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl %edx, 20(%esp) # 4-byte Spill
+ movl 84(%esp), %ecx
+ movl 12(%ecx), %edi
+ movl %edi, %eax
+ mull %ebx
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl %edx, 16(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ movl (%eax), %ecx
+ movl %ecx, 24(%esp) # 4-byte Spill
+ movl 4(%eax), %eax
+ movl %eax, 28(%esp) # 4-byte Spill
+ mull %ebx
+ movl %edx, 12(%esp) # 4-byte Spill
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ mull %ebx
+ movl %edx, 8(%esp) # 4-byte Spill
+ movl %eax, %ecx
+ movl %ebx, %eax
+ mull %ebx
+ movl %eax, 44(%esp) # 4-byte Spill
+ addl %ebp, %ecx
+ movl 80(%esp), %eax
+ movl %ecx, 8(%eax)
+ movl 32(%esp), %ebx # 4-byte Reload
+ adcl 56(%esp), %ebx # 4-byte Folded Reload
+ movl 44(%esp), %ebp # 4-byte Reload
+ adcl 48(%esp), %ebp # 4-byte Folded Reload
+ movl 36(%esp), %ecx # 4-byte Reload
+ adcl 52(%esp), %ecx # 4-byte Folded Reload
+ movl 40(%esp), %eax # 4-byte Reload
+ adcl %esi, %eax
+ sbbl %esi, %esi
+ andl $1, %esi
+ addl 8(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 32(%esp) # 4-byte Spill
+ adcl 12(%esp), %ebp # 4-byte Folded Reload
+ movl %ebp, 44(%esp) # 4-byte Spill
+ adcl %edx, %ecx
+ movl %ecx, 36(%esp) # 4-byte Spill
+ adcl 16(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 40(%esp) # 4-byte Spill
+ adcl 20(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 12(%esp) # 4-byte Spill
+ movl 84(%esp), %eax
+ movl 8(%eax), %ecx
+ movl %ecx, 20(%esp) # 4-byte Spill
+ movl 16(%eax), %ebx
+ movl %ebx, %eax
+ mull %edi
+ movl %edx, 52(%esp) # 4-byte Spill
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ mull %edi
+ movl %edx, 48(%esp) # 4-byte Spill
+ movl %eax, %esi
+ movl 28(%esp), %eax # 4-byte Reload
+ mull %edi
+ movl %edx, 28(%esp) # 4-byte Spill
+ movl %eax, %ebp
+ movl 24(%esp), %eax # 4-byte Reload
+ mull %edi
+ movl %edx, 16(%esp) # 4-byte Spill
+ movl %eax, %ecx
+ movl %edi, %eax
+ mull %edi
+ movl %eax, %edi
+ movl %edx, 24(%esp) # 4-byte Spill
+ addl 32(%esp), %ecx # 4-byte Folded Reload
+ movl 80(%esp), %eax
+ movl %ecx, 12(%eax)
+ adcl 44(%esp), %ebp # 4-byte Folded Reload
+ adcl 36(%esp), %esi # 4-byte Folded Reload
+ adcl 40(%esp), %edi # 4-byte Folded Reload
+ movl 12(%esp), %eax # 4-byte Reload
+ adcl %eax, 56(%esp) # 4-byte Folded Spill
+ sbbl %ecx, %ecx
+ movl %ebx, %eax
+ movl 84(%esp), %edx
+ mull 12(%edx)
+ movl %edx, 44(%esp) # 4-byte Spill
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl %ebx, %eax
+ movl 84(%esp), %edx
+ mull 4(%edx)
+ movl %edx, 36(%esp) # 4-byte Spill
+ movl %eax, 8(%esp) # 4-byte Spill
+ movl %ebx, %eax
+ movl 84(%esp), %edx
+ mull (%edx)
+ movl %edx, 12(%esp) # 4-byte Spill
+ movl %eax, (%esp) # 4-byte Spill
+ movl %ebx, %eax
+ mull %ebx
+ movl %edx, 40(%esp) # 4-byte Spill
+ movl %eax, 4(%esp) # 4-byte Spill
+ movl 20(%esp), %eax # 4-byte Reload
+ mull %ebx
+ movl %eax, 20(%esp) # 4-byte Spill
+ andl $1, %ecx
+ addl 16(%esp), %ebp # 4-byte Folded Reload
+ adcl 28(%esp), %esi # 4-byte Folded Reload
+ adcl 48(%esp), %edi # 4-byte Folded Reload
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 24(%esp), %eax # 4-byte Folded Reload
+ adcl 52(%esp), %ecx # 4-byte Folded Reload
+ addl (%esp), %ebp # 4-byte Folded Reload
+ movl 80(%esp), %ebx
+ movl %ebp, 16(%ebx)
+ adcl 8(%esp), %esi # 4-byte Folded Reload
+ adcl 20(%esp), %edi # 4-byte Folded Reload
+ movl %eax, %ebp
+ adcl 32(%esp), %ebp # 4-byte Folded Reload
+ adcl 4(%esp), %ecx # 4-byte Folded Reload
+ sbbl %eax, %eax
+ andl $1, %eax
+ addl 12(%esp), %esi # 4-byte Folded Reload
+ adcl 36(%esp), %edi # 4-byte Folded Reload
+ movl %esi, 20(%ebx)
+ adcl %edx, %ebp
+ movl %edi, 24(%ebx)
+ adcl 44(%esp), %ecx # 4-byte Folded Reload
+ movl %ebp, 28(%ebx)
+ movl %ecx, 32(%ebx)
+ adcl 40(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 36(%ebx)
+ addl $60, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end67:
+ .size mcl_fpDbl_sqrPre5L, .Lfunc_end67-mcl_fpDbl_sqrPre5L
+
+ .globl mcl_fp_mont5L
+ .align 16, 0x90
+ .type mcl_fp_mont5L,@function
+mcl_fp_mont5L: # @mcl_fp_mont5L
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $112, %esp
+ movl 136(%esp), %ebx
+ movl (%ebx), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 140(%esp), %ecx
+ movl (%ecx), %ecx
+ movl %ecx, 32(%esp) # 4-byte Spill
+ mull %ecx
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl %edx, 40(%esp) # 4-byte Spill
+ movl 144(%esp), %esi
+ movl -4(%esi), %edx
+ movl %edx, 96(%esp) # 4-byte Spill
+ movl %eax, %ecx
+ imull %edx, %ecx
+ movl (%esi), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 16(%esi), %edx
+ movl %edx, 92(%esp) # 4-byte Spill
+ movl 12(%esi), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 8(%esi), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 4(%esi), %esi
+ movl %esi, 88(%esp) # 4-byte Spill
+ movl 4(%ebx), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 16(%ebx), %ebp
+ movl %ebp, 72(%esp) # 4-byte Spill
+ movl 12(%ebx), %edi
+ movl %edi, 68(%esp) # 4-byte Spill
+ movl 8(%ebx), %ebx
+ movl %ebx, 76(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ mull %edx
+ movl %edx, 56(%esp) # 4-byte Spill
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ mull 108(%esp) # 4-byte Folded Reload
+ movl %edx, 52(%esp) # 4-byte Spill
+ movl %eax, 24(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ mull 104(%esp) # 4-byte Folded Reload
+ movl %edx, 48(%esp) # 4-byte Spill
+ movl %eax, 20(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ mull %esi
+ movl %edx, 44(%esp) # 4-byte Spill
+ movl %eax, 12(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ mull 100(%esp) # 4-byte Folded Reload
+ movl %edx, %esi
+ movl %eax, 16(%esp) # 4-byte Spill
+ movl %ebp, %eax
+ movl 32(%esp), %ecx # 4-byte Reload
+ mull %ecx
+ movl %edx, 64(%esp) # 4-byte Spill
+ movl %eax, 8(%esp) # 4-byte Spill
+ movl %edi, %eax
+ mull %ecx
+ movl %edx, 60(%esp) # 4-byte Spill
+ movl %eax, 4(%esp) # 4-byte Spill
+ movl %ebx, %eax
+ mull %ecx
+ movl %edx, %ebp
+ movl %eax, (%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ mull %ecx
+ movl %edx, %ebx
+ movl %eax, %edi
+ addl 40(%esp), %edi # 4-byte Folded Reload
+ adcl (%esp), %ebx # 4-byte Folded Reload
+ adcl 4(%esp), %ebp # 4-byte Folded Reload
+ movl %ebp, 32(%esp) # 4-byte Spill
+ movl 8(%esp), %eax # 4-byte Reload
+ adcl %eax, 60(%esp) # 4-byte Folded Spill
+ adcl $0, 64(%esp) # 4-byte Folded Spill
+ addl 12(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 40(%esp) # 4-byte Spill
+ movl 44(%esp), %esi # 4-byte Reload
+ adcl 20(%esp), %esi # 4-byte Folded Reload
+ movl 48(%esp), %edx # 4-byte Reload
+ adcl 24(%esp), %edx # 4-byte Folded Reload
+ movl 52(%esp), %ecx # 4-byte Reload
+ adcl 28(%esp), %ecx # 4-byte Folded Reload
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl $0, %eax
+ movl 16(%esp), %ebp # 4-byte Reload
+ addl 36(%esp), %ebp # 4-byte Folded Reload
+ adcl %edi, 40(%esp) # 4-byte Folded Spill
+ adcl %ebx, %esi
+ movl %esi, 44(%esp) # 4-byte Spill
+ adcl 32(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 48(%esp) # 4-byte Spill
+ adcl 60(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 52(%esp) # 4-byte Spill
+ adcl 64(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 56(%esp) # 4-byte Spill
+ sbbl %eax, %eax
+ andl $1, %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 140(%esp), %eax
+ movl 4(%eax), %edi
+ movl %edi, %eax
+ mull 72(%esp) # 4-byte Folded Reload
+ movl %edx, 60(%esp) # 4-byte Spill
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl %edi, %eax
+ mull 68(%esp) # 4-byte Folded Reload
+ movl %edx, 64(%esp) # 4-byte Spill
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl %edi, %eax
+ mull 76(%esp) # 4-byte Folded Reload
+ movl %edx, %ecx
+ movl %eax, 24(%esp) # 4-byte Spill
+ movl %edi, %eax
+ mull 84(%esp) # 4-byte Folded Reload
+ movl %edx, %esi
+ movl %eax, %ebp
+ movl %edi, %eax
+ mull 80(%esp) # 4-byte Folded Reload
+ movl %eax, %ebx
+ addl %ebp, %edx
+ adcl 24(%esp), %esi # 4-byte Folded Reload
+ adcl 28(%esp), %ecx # 4-byte Folded Reload
+ movl 64(%esp), %edi # 4-byte Reload
+ adcl 36(%esp), %edi # 4-byte Folded Reload
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl $0, %eax
+ addl 40(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 36(%esp) # 4-byte Spill
+ adcl 44(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 44(%esp) # 4-byte Spill
+ adcl 48(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 48(%esp) # 4-byte Spill
+ adcl 52(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 40(%esp) # 4-byte Spill
+ adcl 56(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 64(%esp) # 4-byte Spill
+ adcl 32(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 60(%esp) # 4-byte Spill
+ sbbl %eax, %eax
+ movl %ebx, %ebp
+ imull 96(%esp), %ebp # 4-byte Folded Reload
+ andl $1, %eax
+ movl %eax, %ebx
+ movl %ebp, %eax
+ mull 92(%esp) # 4-byte Folded Reload
+ movl %edx, 52(%esp) # 4-byte Spill
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl %ebp, %eax
+ mull 108(%esp) # 4-byte Folded Reload
+ movl %edx, %ecx
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl %ebp, %eax
+ mull 104(%esp) # 4-byte Folded Reload
+ movl %edx, %esi
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl %ebp, %eax
+ mull 88(%esp) # 4-byte Folded Reload
+ movl %edx, %edi
+ movl %eax, 24(%esp) # 4-byte Spill
+ movl %ebp, %eax
+ mull 100(%esp) # 4-byte Folded Reload
+ movl %edx, %ebp
+ addl 24(%esp), %ebp # 4-byte Folded Reload
+ adcl 28(%esp), %edi # 4-byte Folded Reload
+ adcl 32(%esp), %esi # 4-byte Folded Reload
+ movl %ecx, %edx
+ adcl 56(%esp), %edx # 4-byte Folded Reload
+ movl 52(%esp), %ecx # 4-byte Reload
+ adcl $0, %ecx
+ addl 36(%esp), %eax # 4-byte Folded Reload
+ adcl 44(%esp), %ebp # 4-byte Folded Reload
+ adcl 48(%esp), %edi # 4-byte Folded Reload
+ adcl 40(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 44(%esp) # 4-byte Spill
+ adcl 64(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 32(%esp) # 4-byte Spill
+ adcl 60(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 52(%esp) # 4-byte Spill
+ adcl $0, %ebx
+ movl %ebx, 36(%esp) # 4-byte Spill
+ movl 140(%esp), %eax
+ movl 8(%eax), %ebx
+ movl %ebx, %eax
+ mull 72(%esp) # 4-byte Folded Reload
+ movl %edx, 56(%esp) # 4-byte Spill
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl %ebx, %eax
+ mull 68(%esp) # 4-byte Folded Reload
+ movl %edx, 60(%esp) # 4-byte Spill
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl %ebx, %eax
+ mull 76(%esp) # 4-byte Folded Reload
+ movl %edx, 64(%esp) # 4-byte Spill
+ movl %eax, 24(%esp) # 4-byte Spill
+ movl %ebx, %eax
+ mull 84(%esp) # 4-byte Folded Reload
+ movl %edx, %ecx
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl %ebx, %eax
+ mull 80(%esp) # 4-byte Folded Reload
+ movl %eax, %esi
+ addl 48(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 48(%esp) # 4-byte Spill
+ adcl 24(%esp), %ecx # 4-byte Folded Reload
+ movl 64(%esp), %ebx # 4-byte Reload
+ adcl 28(%esp), %ebx # 4-byte Folded Reload
+ movl 60(%esp), %edx # 4-byte Reload
+ adcl 40(%esp), %edx # 4-byte Folded Reload
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl $0, %eax
+ addl %ebp, %esi
+ movl %esi, 40(%esp) # 4-byte Spill
+ adcl %edi, 48(%esp) # 4-byte Folded Spill
+ adcl 44(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 44(%esp) # 4-byte Spill
+ adcl 32(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 64(%esp) # 4-byte Spill
+ adcl 52(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 60(%esp) # 4-byte Spill
+ adcl 36(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 56(%esp) # 4-byte Spill
+ sbbl %eax, %eax
+ movl %esi, %ebp
+ imull 96(%esp), %ebp # 4-byte Folded Reload
+ andl $1, %eax
+ movl %eax, %ebx
+ movl %ebp, %eax
+ mull 92(%esp) # 4-byte Folded Reload
+ movl %edx, 52(%esp) # 4-byte Spill
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl %ebp, %eax
+ mull 108(%esp) # 4-byte Folded Reload
+ movl %edx, %ecx
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl %ebp, %eax
+ mull 104(%esp) # 4-byte Folded Reload
+ movl %edx, %edi
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl %ebp, %eax
+ mull 88(%esp) # 4-byte Folded Reload
+ movl %edx, %esi
+ movl %eax, 24(%esp) # 4-byte Spill
+ movl %ebp, %eax
+ mull 100(%esp) # 4-byte Folded Reload
+ movl %edx, %ebp
+ addl 24(%esp), %ebp # 4-byte Folded Reload
+ adcl 28(%esp), %esi # 4-byte Folded Reload
+ movl %edi, %edx
+ adcl 32(%esp), %edx # 4-byte Folded Reload
+ adcl 36(%esp), %ecx # 4-byte Folded Reload
+ movl 52(%esp), %edi # 4-byte Reload
+ adcl $0, %edi
+ addl 40(%esp), %eax # 4-byte Folded Reload
+ adcl 48(%esp), %ebp # 4-byte Folded Reload
+ adcl 44(%esp), %esi # 4-byte Folded Reload
+ adcl 64(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 44(%esp) # 4-byte Spill
+ adcl 60(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 32(%esp) # 4-byte Spill
+ adcl 56(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 52(%esp) # 4-byte Spill
+ adcl $0, %ebx
+ movl %ebx, 36(%esp) # 4-byte Spill
+ movl 140(%esp), %eax
+ movl 12(%eax), %edi
+ movl %edi, %eax
+ mull 72(%esp) # 4-byte Folded Reload
+ movl %edx, 56(%esp) # 4-byte Spill
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl %edi, %eax
+ mull 68(%esp) # 4-byte Folded Reload
+ movl %edx, 60(%esp) # 4-byte Spill
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl %edi, %eax
+ mull 76(%esp) # 4-byte Folded Reload
+ movl %edx, 64(%esp) # 4-byte Spill
+ movl %eax, 24(%esp) # 4-byte Spill
+ movl %edi, %eax
+ mull 84(%esp) # 4-byte Folded Reload
+ movl %edx, %ecx
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl %edi, %eax
+ mull 80(%esp) # 4-byte Folded Reload
+ movl %eax, %ebx
+ addl 48(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 48(%esp) # 4-byte Spill
+ adcl 24(%esp), %ecx # 4-byte Folded Reload
+ movl 64(%esp), %edi # 4-byte Reload
+ adcl 28(%esp), %edi # 4-byte Folded Reload
+ movl 60(%esp), %edx # 4-byte Reload
+ adcl 40(%esp), %edx # 4-byte Folded Reload
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl $0, %eax
+ addl %ebp, %ebx
+ movl %ebx, 40(%esp) # 4-byte Spill
+ adcl %esi, 48(%esp) # 4-byte Folded Spill
+ adcl 44(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 44(%esp) # 4-byte Spill
+ adcl 32(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 64(%esp) # 4-byte Spill
+ adcl 52(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 60(%esp) # 4-byte Spill
+ adcl 36(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 56(%esp) # 4-byte Spill
+ sbbl %eax, %eax
+ movl %ebx, %ebp
+ imull 96(%esp), %ebp # 4-byte Folded Reload
+ andl $1, %eax
+ movl %eax, %ebx
+ movl %ebp, %eax
+ mull 92(%esp) # 4-byte Folded Reload
+ movl %edx, 52(%esp) # 4-byte Spill
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl %ebp, %eax
+ mull 108(%esp) # 4-byte Folded Reload
+ movl %edx, %ecx
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl %ebp, %eax
+ mull 104(%esp) # 4-byte Folded Reload
+ movl %edx, %esi
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl %ebp, %eax
+ mull 88(%esp) # 4-byte Folded Reload
+ movl %edx, %edi
+ movl %eax, 24(%esp) # 4-byte Spill
+ movl %ebp, %eax
+ mull 100(%esp) # 4-byte Folded Reload
+ movl %edx, %ebp
+ addl 24(%esp), %ebp # 4-byte Folded Reload
+ adcl 28(%esp), %edi # 4-byte Folded Reload
+ adcl 32(%esp), %esi # 4-byte Folded Reload
+ movl %ecx, %edx
+ adcl 36(%esp), %edx # 4-byte Folded Reload
+ movl 52(%esp), %ecx # 4-byte Reload
+ adcl $0, %ecx
+ addl 40(%esp), %eax # 4-byte Folded Reload
+ adcl 48(%esp), %ebp # 4-byte Folded Reload
+ adcl 44(%esp), %edi # 4-byte Folded Reload
+ adcl 64(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 48(%esp) # 4-byte Spill
+ adcl 60(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 60(%esp) # 4-byte Spill
+ adcl 56(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 52(%esp) # 4-byte Spill
+ adcl $0, %ebx
+ movl %ebx, 64(%esp) # 4-byte Spill
+ movl 140(%esp), %eax
+ movl 16(%eax), %ebx
+ movl %ebx, %eax
+ mull 72(%esp) # 4-byte Folded Reload
+ movl %edx, %esi
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl %ebx, %eax
+ mull 68(%esp) # 4-byte Folded Reload
+ movl %edx, 72(%esp) # 4-byte Spill
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl %ebx, %eax
+ mull 76(%esp) # 4-byte Folded Reload
+ movl %edx, 76(%esp) # 4-byte Spill
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl %ebx, %eax
+ mull 84(%esp) # 4-byte Folded Reload
+ movl %edx, %ecx
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl %ebx, %eax
+ mull 80(%esp) # 4-byte Folded Reload
+ movl %eax, 84(%esp) # 4-byte Spill
+ addl 40(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 80(%esp) # 4-byte Spill
+ adcl 44(%esp), %ecx # 4-byte Folded Reload
+ movl 76(%esp), %ebx # 4-byte Reload
+ adcl 68(%esp), %ebx # 4-byte Folded Reload
+ movl 72(%esp), %edx # 4-byte Reload
+ adcl 56(%esp), %edx # 4-byte Folded Reload
+ movl %esi, %eax
+ adcl $0, %eax
+ movl 84(%esp), %esi # 4-byte Reload
+ addl %ebp, %esi
+ movl %esi, 84(%esp) # 4-byte Spill
+ adcl %edi, 80(%esp) # 4-byte Folded Spill
+ adcl 48(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 68(%esp) # 4-byte Spill
+ adcl 60(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 76(%esp) # 4-byte Spill
+ adcl 52(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 72(%esp) # 4-byte Spill
+ adcl 64(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 64(%esp) # 4-byte Spill
+ sbbl %ebx, %ebx
+ movl 96(%esp), %ecx # 4-byte Reload
+ imull %esi, %ecx
+ movl %ecx, 96(%esp) # 4-byte Spill
+ andl $1, %ebx
+ movl %ecx, %eax
+ mull 92(%esp) # 4-byte Folded Reload
+ movl %edx, %edi
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ mull 108(%esp) # 4-byte Folded Reload
+ movl %edx, %ebp
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ mull 100(%esp) # 4-byte Folded Reload
+ movl %edx, 48(%esp) # 4-byte Spill
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ mull 104(%esp) # 4-byte Folded Reload
+ movl %edx, %esi
+ movl %eax, %ecx
+ movl 96(%esp), %eax # 4-byte Reload
+ mull 88(%esp) # 4-byte Folded Reload
+ addl 48(%esp), %eax # 4-byte Folded Reload
+ adcl %ecx, %edx
+ adcl 52(%esp), %esi # 4-byte Folded Reload
+ adcl 60(%esp), %ebp # 4-byte Folded Reload
+ adcl $0, %edi
+ movl 56(%esp), %ecx # 4-byte Reload
+ addl 84(%esp), %ecx # 4-byte Folded Reload
+ adcl 80(%esp), %eax # 4-byte Folded Reload
+ adcl 68(%esp), %edx # 4-byte Folded Reload
+ adcl 76(%esp), %esi # 4-byte Folded Reload
+ adcl 72(%esp), %ebp # 4-byte Folded Reload
+ movl %ebp, 96(%esp) # 4-byte Spill
+ adcl 64(%esp), %edi # 4-byte Folded Reload
+ adcl $0, %ebx
+ movl %eax, %ecx
+ subl 100(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 100(%esp) # 4-byte Spill
+ movl %edx, %ecx
+ sbbl 88(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 88(%esp) # 4-byte Spill
+ movl %esi, %ecx
+ sbbl 104(%esp), %ecx # 4-byte Folded Reload
+ sbbl 108(%esp), %ebp # 4-byte Folded Reload
+ movl %ebp, 108(%esp) # 4-byte Spill
+ movl %edi, %ebp
+ sbbl 92(%esp), %ebp # 4-byte Folded Reload
+ sbbl $0, %ebx
+ andl $1, %ebx
+ jne .LBB68_2
+# BB#1:
+ movl 88(%esp), %edx # 4-byte Reload
+.LBB68_2:
+ testb %bl, %bl
+ jne .LBB68_4
+# BB#3:
+ movl 100(%esp), %eax # 4-byte Reload
+.LBB68_4:
+ movl 132(%esp), %ebx
+ movl %eax, (%ebx)
+ movl %edx, 4(%ebx)
+ jne .LBB68_6
+# BB#5:
+ movl %ecx, %esi
+.LBB68_6:
+ movl %esi, 8(%ebx)
+ movl 96(%esp), %eax # 4-byte Reload
+ jne .LBB68_8
+# BB#7:
+ movl 108(%esp), %eax # 4-byte Reload
+.LBB68_8:
+ movl %eax, 12(%ebx)
+ jne .LBB68_10
+# BB#9:
+ movl %ebp, %edi
+.LBB68_10:
+ movl %edi, 16(%ebx)
+ addl $112, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end68:
+ .size mcl_fp_mont5L, .Lfunc_end68-mcl_fp_mont5L
+
+ .globl mcl_fp_montNF5L
+ .align 16, 0x90
+ .type mcl_fp_montNF5L,@function
+mcl_fp_montNF5L: # @mcl_fp_montNF5L
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $104, %esp
+ movl 128(%esp), %ebx
+ movl (%ebx), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 132(%esp), %ecx
+ movl (%ecx), %ecx
+ mull %ecx
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl %edx, 68(%esp) # 4-byte Spill
+ movl 136(%esp), %esi
+ movl -4(%esi), %edx
+ movl %edx, 84(%esp) # 4-byte Spill
+ movl %eax, %edi
+ imull %edx, %edi
+ movl (%esi), %edx
+ movl %edx, 100(%esp) # 4-byte Spill
+ movl 16(%esi), %edx
+ movl %edx, 80(%esp) # 4-byte Spill
+ movl 12(%esi), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 8(%esi), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 4(%esi), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 4(%ebx), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 16(%ebx), %ebp
+ movl %ebp, 52(%esp) # 4-byte Spill
+ movl 12(%ebx), %esi
+ movl %esi, 56(%esp) # 4-byte Spill
+ movl 8(%ebx), %ebx
+ movl %ebx, 60(%esp) # 4-byte Spill
+ movl %edi, %eax
+ mull %edx
+ movl %edx, 44(%esp) # 4-byte Spill
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl %edi, %eax
+ mull 96(%esp) # 4-byte Folded Reload
+ movl %edx, 40(%esp) # 4-byte Spill
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl %edi, %eax
+ mull 92(%esp) # 4-byte Folded Reload
+ movl %edx, 32(%esp) # 4-byte Spill
+ movl %eax, 20(%esp) # 4-byte Spill
+ movl %edi, %eax
+ mull 88(%esp) # 4-byte Folded Reload
+ movl %edx, 24(%esp) # 4-byte Spill
+ movl %eax, 12(%esp) # 4-byte Spill
+ movl %edi, %eax
+ mull 100(%esp) # 4-byte Folded Reload
+ movl %edx, 16(%esp) # 4-byte Spill
+ movl %eax, 8(%esp) # 4-byte Spill
+ movl %ebp, %eax
+ mull %ecx
+ movl %edx, 72(%esp) # 4-byte Spill
+ movl %eax, 4(%esp) # 4-byte Spill
+ movl %esi, %eax
+ mull %ecx
+ movl %edx, %edi
+ movl %eax, (%esp) # 4-byte Spill
+ movl %ebx, %eax
+ mull %ecx
+ movl %edx, %ebp
+ movl %eax, %ebx
+ movl 76(%esp), %eax # 4-byte Reload
+ mull %ecx
+ movl %edx, %ecx
+ movl %eax, %esi
+ addl 68(%esp), %esi # 4-byte Folded Reload
+ adcl %ebx, %ecx
+ adcl (%esp), %ebp # 4-byte Folded Reload
+ adcl 4(%esp), %edi # 4-byte Folded Reload
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl $0, %eax
+ movl 8(%esp), %edx # 4-byte Reload
+ addl 48(%esp), %edx # 4-byte Folded Reload
+ adcl 12(%esp), %esi # 4-byte Folded Reload
+ adcl 20(%esp), %ecx # 4-byte Folded Reload
+ adcl 28(%esp), %ebp # 4-byte Folded Reload
+ adcl 36(%esp), %edi # 4-byte Folded Reload
+ adcl $0, %eax
+ addl 16(%esp), %esi # 4-byte Folded Reload
+ adcl 24(%esp), %ecx # 4-byte Folded Reload
+ adcl 32(%esp), %ebp # 4-byte Folded Reload
+ adcl 40(%esp), %edi # 4-byte Folded Reload
+ adcl 44(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 132(%esp), %eax
+ movl 4(%eax), %ebx
+ movl %ebx, %eax
+ mull 52(%esp) # 4-byte Folded Reload
+ movl %edx, 68(%esp) # 4-byte Spill
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl %ebx, %eax
+ mull 56(%esp) # 4-byte Folded Reload
+ movl %edx, 40(%esp) # 4-byte Spill
+ movl %eax, 24(%esp) # 4-byte Spill
+ movl %ebx, %eax
+ mull 60(%esp) # 4-byte Folded Reload
+ movl %edx, 36(%esp) # 4-byte Spill
+ movl %eax, 20(%esp) # 4-byte Spill
+ movl %ebx, %eax
+ mull 76(%esp) # 4-byte Folded Reload
+ movl %edx, 48(%esp) # 4-byte Spill
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl %ebx, %eax
+ mull 64(%esp) # 4-byte Folded Reload
+ movl %eax, 28(%esp) # 4-byte Spill
+ addl 32(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 32(%esp) # 4-byte Spill
+ movl 20(%esp), %eax # 4-byte Reload
+ adcl %eax, 48(%esp) # 4-byte Folded Spill
+ movl 36(%esp), %ebx # 4-byte Reload
+ adcl 24(%esp), %ebx # 4-byte Folded Reload
+ movl 40(%esp), %edx # 4-byte Reload
+ adcl 44(%esp), %edx # 4-byte Folded Reload
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl $0, %eax
+ addl %esi, 28(%esp) # 4-byte Folded Spill
+ adcl %ecx, 32(%esp) # 4-byte Folded Spill
+ movl 48(%esp), %ecx # 4-byte Reload
+ adcl %ebp, %ecx
+ adcl %edi, %ebx
+ movl %ebx, 36(%esp) # 4-byte Spill
+ adcl 72(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 40(%esp) # 4-byte Spill
+ adcl $0, %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 28(%esp), %esi # 4-byte Reload
+ movl %esi, %edi
+ imull 84(%esp), %edi # 4-byte Folded Reload
+ movl %edi, %eax
+ mull 80(%esp) # 4-byte Folded Reload
+ movl %edx, 72(%esp) # 4-byte Spill
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl %edi, %eax
+ mull 96(%esp) # 4-byte Folded Reload
+ movl %edx, 24(%esp) # 4-byte Spill
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl %edi, %eax
+ mull 92(%esp) # 4-byte Folded Reload
+ movl %edx, 20(%esp) # 4-byte Spill
+ movl %eax, %ebp
+ movl %edi, %eax
+ mull 88(%esp) # 4-byte Folded Reload
+ movl %edx, 16(%esp) # 4-byte Spill
+ movl %eax, %ebx
+ movl %edi, %eax
+ mull 100(%esp) # 4-byte Folded Reload
+ addl %esi, %eax
+ adcl 32(%esp), %ebx # 4-byte Folded Reload
+ adcl %ecx, %ebp
+ movl 44(%esp), %ecx # 4-byte Reload
+ adcl 36(%esp), %ecx # 4-byte Folded Reload
+ movl 48(%esp), %esi # 4-byte Reload
+ adcl 40(%esp), %esi # 4-byte Folded Reload
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl $0, %eax
+ addl %edx, %ebx
+ adcl 16(%esp), %ebp # 4-byte Folded Reload
+ movl %ebp, 40(%esp) # 4-byte Spill
+ adcl 20(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 44(%esp) # 4-byte Spill
+ adcl 24(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 48(%esp) # 4-byte Spill
+ adcl 72(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 132(%esp), %eax
+ movl 8(%eax), %ecx
+ movl %ecx, %eax
+ mull 52(%esp) # 4-byte Folded Reload
+ movl %edx, 72(%esp) # 4-byte Spill
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ mull 56(%esp) # 4-byte Folded Reload
+ movl %edx, 36(%esp) # 4-byte Spill
+ movl %eax, 24(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ mull 60(%esp) # 4-byte Folded Reload
+ movl %edx, %esi
+ movl %eax, 20(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ mull 76(%esp) # 4-byte Folded Reload
+ movl %edx, %edi
+ movl %eax, 16(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ mull 64(%esp) # 4-byte Folded Reload
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl %edx, %ebp
+ addl 16(%esp), %ebp # 4-byte Folded Reload
+ adcl 20(%esp), %edi # 4-byte Folded Reload
+ adcl 24(%esp), %esi # 4-byte Folded Reload
+ movl 36(%esp), %ecx # 4-byte Reload
+ adcl 28(%esp), %ecx # 4-byte Folded Reload
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl $0, %eax
+ movl 32(%esp), %edx # 4-byte Reload
+ addl %ebx, %edx
+ movl %edx, 32(%esp) # 4-byte Spill
+ adcl 40(%esp), %ebp # 4-byte Folded Reload
+ adcl 44(%esp), %edi # 4-byte Folded Reload
+ adcl 48(%esp), %esi # 4-byte Folded Reload
+ adcl 68(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 36(%esp) # 4-byte Spill
+ adcl $0, %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl %edx, %ebx
+ imull 84(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, %eax
+ mull 80(%esp) # 4-byte Folded Reload
+ movl %edx, 68(%esp) # 4-byte Spill
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl %ebx, %eax
+ mull 96(%esp) # 4-byte Folded Reload
+ movl %edx, 28(%esp) # 4-byte Spill
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl %ebx, %eax
+ mull 92(%esp) # 4-byte Folded Reload
+ movl %edx, 24(%esp) # 4-byte Spill
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl %ebx, %eax
+ mull 88(%esp) # 4-byte Folded Reload
+ movl %edx, 20(%esp) # 4-byte Spill
+ movl %eax, %ecx
+ movl %ebx, %eax
+ mull 100(%esp) # 4-byte Folded Reload
+ addl 32(%esp), %eax # 4-byte Folded Reload
+ adcl %ebp, %ecx
+ movl 40(%esp), %ebx # 4-byte Reload
+ adcl %edi, %ebx
+ movl 44(%esp), %edi # 4-byte Reload
+ adcl %esi, %edi
+ movl 48(%esp), %esi # 4-byte Reload
+ adcl 36(%esp), %esi # 4-byte Folded Reload
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl $0, %eax
+ addl %edx, %ecx
+ adcl 20(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 40(%esp) # 4-byte Spill
+ adcl 24(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 44(%esp) # 4-byte Spill
+ adcl 28(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 48(%esp) # 4-byte Spill
+ adcl 68(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 132(%esp), %eax
+ movl 12(%eax), %edi
+ movl %edi, %eax
+ mull 52(%esp) # 4-byte Folded Reload
+ movl %edx, 68(%esp) # 4-byte Spill
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl %edi, %eax
+ mull 56(%esp) # 4-byte Folded Reload
+ movl %edx, 36(%esp) # 4-byte Spill
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl %edi, %eax
+ mull 60(%esp) # 4-byte Folded Reload
+ movl %edx, %esi
+ movl %eax, 24(%esp) # 4-byte Spill
+ movl %edi, %eax
+ mull 76(%esp) # 4-byte Folded Reload
+ movl %edx, %ebx
+ movl %eax, 20(%esp) # 4-byte Spill
+ movl %edi, %eax
+ mull 64(%esp) # 4-byte Folded Reload
+ movl %edx, %ebp
+ addl 20(%esp), %ebp # 4-byte Folded Reload
+ adcl 24(%esp), %ebx # 4-byte Folded Reload
+ adcl 28(%esp), %esi # 4-byte Folded Reload
+ movl 36(%esp), %edx # 4-byte Reload
+ adcl 32(%esp), %edx # 4-byte Folded Reload
+ movl 68(%esp), %edi # 4-byte Reload
+ adcl $0, %edi
+ addl %ecx, %eax
+ movl %eax, 20(%esp) # 4-byte Spill
+ adcl 40(%esp), %ebp # 4-byte Folded Reload
+ adcl 44(%esp), %ebx # 4-byte Folded Reload
+ adcl 48(%esp), %esi # 4-byte Folded Reload
+ adcl 72(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 36(%esp) # 4-byte Spill
+ adcl $0, %edi
+ movl %edi, 68(%esp) # 4-byte Spill
+ movl %eax, %ecx
+ imull 84(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, %eax
+ mull 80(%esp) # 4-byte Folded Reload
+ movl %edx, 32(%esp) # 4-byte Spill
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ mull 96(%esp) # 4-byte Folded Reload
+ movl %edx, 28(%esp) # 4-byte Spill
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ mull 92(%esp) # 4-byte Folded Reload
+ movl %edx, 24(%esp) # 4-byte Spill
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ mull 88(%esp) # 4-byte Folded Reload
+ movl %edx, 16(%esp) # 4-byte Spill
+ movl %eax, %edi
+ movl %ecx, %eax
+ mull 100(%esp) # 4-byte Folded Reload
+ movl %edx, 40(%esp) # 4-byte Spill
+ addl 20(%esp), %eax # 4-byte Folded Reload
+ movl %edi, %edx
+ adcl %ebp, %edx
+ movl 44(%esp), %edi # 4-byte Reload
+ adcl %ebx, %edi
+ movl 48(%esp), %ecx # 4-byte Reload
+ adcl %esi, %ecx
+ movl 72(%esp), %esi # 4-byte Reload
+ adcl 36(%esp), %esi # 4-byte Folded Reload
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl $0, %eax
+ addl 40(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 40(%esp) # 4-byte Spill
+ adcl 16(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 44(%esp) # 4-byte Spill
+ adcl 24(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 48(%esp) # 4-byte Spill
+ adcl 28(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 72(%esp) # 4-byte Spill
+ adcl 32(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 132(%esp), %eax
+ movl 16(%eax), %ecx
+ movl %ecx, %eax
+ mull 52(%esp) # 4-byte Folded Reload
+ movl %edx, %edi
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ mull 56(%esp) # 4-byte Folded Reload
+ movl %edx, %esi
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ mull 60(%esp) # 4-byte Folded Reload
+ movl %edx, %ebx
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ mull 76(%esp) # 4-byte Folded Reload
+ movl %edx, %ebp
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ mull 64(%esp) # 4-byte Folded Reload
+ addl 76(%esp), %edx # 4-byte Folded Reload
+ adcl 60(%esp), %ebp # 4-byte Folded Reload
+ adcl 56(%esp), %ebx # 4-byte Folded Reload
+ adcl 52(%esp), %esi # 4-byte Folded Reload
+ adcl $0, %edi
+ addl 40(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 40(%esp) # 4-byte Spill
+ adcl 44(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 56(%esp) # 4-byte Spill
+ adcl 48(%esp), %ebp # 4-byte Folded Reload
+ movl %ebp, 60(%esp) # 4-byte Spill
+ adcl 72(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 64(%esp) # 4-byte Spill
+ adcl 68(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 68(%esp) # 4-byte Spill
+ adcl $0, %edi
+ movl 84(%esp), %ecx # 4-byte Reload
+ imull %eax, %ecx
+ movl %ecx, 84(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ mull 80(%esp) # 4-byte Folded Reload
+ movl %edx, 76(%esp) # 4-byte Spill
+ movl %eax, %ebp
+ movl %ecx, %eax
+ mull 96(%esp) # 4-byte Folded Reload
+ movl %edx, 72(%esp) # 4-byte Spill
+ movl %eax, %esi
+ movl %ecx, %eax
+ mull 100(%esp) # 4-byte Folded Reload
+ movl %edx, 52(%esp) # 4-byte Spill
+ movl %eax, %ebx
+ movl %ecx, %eax
+ mull 92(%esp) # 4-byte Folded Reload
+ movl %edx, 48(%esp) # 4-byte Spill
+ movl %eax, %ecx
+ movl 84(%esp), %eax # 4-byte Reload
+ mull 88(%esp) # 4-byte Folded Reload
+ addl 40(%esp), %ebx # 4-byte Folded Reload
+ adcl 56(%esp), %eax # 4-byte Folded Reload
+ adcl 60(%esp), %ecx # 4-byte Folded Reload
+ adcl 64(%esp), %esi # 4-byte Folded Reload
+ adcl 68(%esp), %ebp # 4-byte Folded Reload
+ adcl $0, %edi
+ addl 52(%esp), %eax # 4-byte Folded Reload
+ adcl %edx, %ecx
+ adcl 48(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 84(%esp) # 4-byte Spill
+ adcl 72(%esp), %ebp # 4-byte Folded Reload
+ adcl 76(%esp), %edi # 4-byte Folded Reload
+ movl %eax, %ebx
+ subl 100(%esp), %ebx # 4-byte Folded Reload
+ movl %ecx, %edx
+ sbbl 88(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 88(%esp) # 4-byte Spill
+ sbbl 92(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 92(%esp) # 4-byte Spill
+ movl %ebp, %edx
+ sbbl 96(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 100(%esp) # 4-byte Spill
+ movl %edi, %edx
+ movl %edi, %esi
+ sbbl 80(%esp), %edi # 4-byte Folded Reload
+ movl %edi, %edx
+ sarl $31, %edx
+ testl %edx, %edx
+ js .LBB69_2
+# BB#1:
+ movl %ebx, %eax
+.LBB69_2:
+ movl 124(%esp), %edx
+ movl %eax, (%edx)
+ js .LBB69_4
+# BB#3:
+ movl 88(%esp), %ecx # 4-byte Reload
+.LBB69_4:
+ movl %ecx, 4(%edx)
+ movl 84(%esp), %eax # 4-byte Reload
+ js .LBB69_6
+# BB#5:
+ movl 92(%esp), %eax # 4-byte Reload
+.LBB69_6:
+ movl %eax, 8(%edx)
+ js .LBB69_8
+# BB#7:
+ movl 100(%esp), %ebp # 4-byte Reload
+.LBB69_8:
+ movl %ebp, 12(%edx)
+ js .LBB69_10
+# BB#9:
+ movl %edi, %esi
+.LBB69_10:
+ movl %esi, 16(%edx)
+ addl $104, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end69:
+ .size mcl_fp_montNF5L, .Lfunc_end69-mcl_fp_montNF5L
+
+ .globl mcl_fp_montRed5L
+ .align 16, 0x90
+ .type mcl_fp_montRed5L,@function
+mcl_fp_montRed5L: # @mcl_fp_montRed5L
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $88, %esp
+ movl 116(%esp), %eax
+ movl -4(%eax), %edx
+ movl %edx, 76(%esp) # 4-byte Spill
+ movl (%eax), %ecx
+ movl %ecx, 84(%esp) # 4-byte Spill
+ movl 112(%esp), %esi
+ movl (%esi), %ecx
+ movl %ecx, 80(%esp) # 4-byte Spill
+ imull %edx, %ecx
+ movl 16(%eax), %edx
+ movl %edx, 72(%esp) # 4-byte Spill
+ movl 12(%eax), %edi
+ movl %edi, 68(%esp) # 4-byte Spill
+ movl 8(%eax), %ebp
+ movl %ebp, 64(%esp) # 4-byte Spill
+ movl 4(%eax), %ebx
+ movl %ebx, 60(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ mull %edx
+ movl %edx, 48(%esp) # 4-byte Spill
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ mull %edi
+ movl %edx, 44(%esp) # 4-byte Spill
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ mull %ebp
+ movl %edx, 40(%esp) # 4-byte Spill
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ mull %ebx
+ movl %edx, %ebp
+ movl %eax, %edi
+ movl %ecx, %eax
+ mull 84(%esp) # 4-byte Folded Reload
+ movl %edx, %ebx
+ addl %edi, %ebx
+ adcl 36(%esp), %ebp # 4-byte Folded Reload
+ movl 40(%esp), %edi # 4-byte Reload
+ adcl 52(%esp), %edi # 4-byte Folded Reload
+ movl 44(%esp), %edx # 4-byte Reload
+ adcl 56(%esp), %edx # 4-byte Folded Reload
+ movl 48(%esp), %ecx # 4-byte Reload
+ adcl $0, %ecx
+ addl 80(%esp), %eax # 4-byte Folded Reload
+ adcl 4(%esi), %ebx
+ adcl 8(%esi), %ebp
+ movl %ebp, 16(%esp) # 4-byte Spill
+ adcl 12(%esi), %edi
+ movl %edi, 40(%esp) # 4-byte Spill
+ adcl 16(%esi), %edx
+ movl %edx, 44(%esp) # 4-byte Spill
+ adcl 20(%esi), %ecx
+ movl %ecx, 48(%esp) # 4-byte Spill
+ movl 36(%esi), %eax
+ movl 32(%esi), %ecx
+ movl 28(%esi), %edx
+ movl 24(%esi), %esi
+ adcl $0, %esi
+ movl %esi, 12(%esp) # 4-byte Spill
+ adcl $0, %edx
+ movl %edx, 20(%esp) # 4-byte Spill
+ adcl $0, %ecx
+ movl %ecx, 52(%esp) # 4-byte Spill
+ adcl $0, %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ sbbl %eax, %eax
+ andl $1, %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl %ebx, %ecx
+ imull 76(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, %eax
+ mull 72(%esp) # 4-byte Folded Reload
+ movl %edx, 36(%esp) # 4-byte Spill
+ movl %eax, 8(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ mull 68(%esp) # 4-byte Folded Reload
+ movl %edx, 32(%esp) # 4-byte Spill
+ movl %eax, 4(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ mull 64(%esp) # 4-byte Folded Reload
+ movl %edx, 28(%esp) # 4-byte Spill
+ movl %eax, (%esp) # 4-byte Spill
+ movl %ecx, %eax
+ mull 60(%esp) # 4-byte Folded Reload
+ movl %edx, 24(%esp) # 4-byte Spill
+ movl %eax, %esi
+ movl %ecx, %eax
+ mull 84(%esp) # 4-byte Folded Reload
+ movl %eax, %edi
+ addl %esi, %edx
+ movl %edx, %ebp
+ movl 24(%esp), %ecx # 4-byte Reload
+ adcl (%esp), %ecx # 4-byte Folded Reload
+ movl 28(%esp), %eax # 4-byte Reload
+ adcl 4(%esp), %eax # 4-byte Folded Reload
+ movl 32(%esp), %esi # 4-byte Reload
+ adcl 8(%esp), %esi # 4-byte Folded Reload
+ movl 36(%esp), %edx # 4-byte Reload
+ adcl $0, %edx
+ addl %ebx, %edi
+ adcl 16(%esp), %ebp # 4-byte Folded Reload
+ movl %ebp, 16(%esp) # 4-byte Spill
+ adcl 40(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 24(%esp) # 4-byte Spill
+ adcl 44(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 28(%esp) # 4-byte Spill
+ adcl 48(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 32(%esp) # 4-byte Spill
+ adcl 12(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 36(%esp) # 4-byte Spill
+ adcl $0, 20(%esp) # 4-byte Folded Spill
+ adcl $0, 52(%esp) # 4-byte Folded Spill
+ adcl $0, 56(%esp) # 4-byte Folded Spill
+ adcl $0, 80(%esp) # 4-byte Folded Spill
+ movl %ebp, %esi
+ imull 76(%esp), %esi # 4-byte Folded Reload
+ movl %esi, %eax
+ mull 72(%esp) # 4-byte Folded Reload
+ movl %edx, 48(%esp) # 4-byte Spill
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl %esi, %eax
+ mull 68(%esp) # 4-byte Folded Reload
+ movl %edx, 44(%esp) # 4-byte Spill
+ movl %eax, 12(%esp) # 4-byte Spill
+ movl %esi, %eax
+ mull 64(%esp) # 4-byte Folded Reload
+ movl %edx, %edi
+ movl %eax, 8(%esp) # 4-byte Spill
+ movl %esi, %eax
+ mull 60(%esp) # 4-byte Folded Reload
+ movl %edx, %ecx
+ movl %eax, %ebx
+ movl %esi, %eax
+ mull 84(%esp) # 4-byte Folded Reload
+ movl %edx, %ebp
+ addl %ebx, %ebp
+ adcl 8(%esp), %ecx # 4-byte Folded Reload
+ adcl 12(%esp), %edi # 4-byte Folded Reload
+ movl 44(%esp), %esi # 4-byte Reload
+ adcl 40(%esp), %esi # 4-byte Folded Reload
+ movl 48(%esp), %edx # 4-byte Reload
+ adcl $0, %edx
+ addl 16(%esp), %eax # 4-byte Folded Reload
+ adcl 24(%esp), %ebp # 4-byte Folded Reload
+ adcl 28(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 24(%esp) # 4-byte Spill
+ adcl 32(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 28(%esp) # 4-byte Spill
+ adcl 36(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 44(%esp) # 4-byte Spill
+ adcl 20(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 48(%esp) # 4-byte Spill
+ adcl $0, 52(%esp) # 4-byte Folded Spill
+ movl 56(%esp), %esi # 4-byte Reload
+ adcl $0, %esi
+ adcl $0, 80(%esp) # 4-byte Folded Spill
+ movl %ebp, %edi
+ imull 76(%esp), %edi # 4-byte Folded Reload
+ movl %edi, %eax
+ mull 72(%esp) # 4-byte Folded Reload
+ movl %edx, 40(%esp) # 4-byte Spill
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl %edi, %eax
+ mull 68(%esp) # 4-byte Folded Reload
+ movl %edx, 36(%esp) # 4-byte Spill
+ movl %eax, 20(%esp) # 4-byte Spill
+ movl %edi, %eax
+ mull 64(%esp) # 4-byte Folded Reload
+ movl %edx, 32(%esp) # 4-byte Spill
+ movl %eax, 12(%esp) # 4-byte Spill
+ movl %edi, %eax
+ mull 60(%esp) # 4-byte Folded Reload
+ movl %edx, %ecx
+ movl %eax, 8(%esp) # 4-byte Spill
+ movl %edi, %eax
+ mull 84(%esp) # 4-byte Folded Reload
+ movl %eax, 16(%esp) # 4-byte Spill
+ movl %edx, %ebx
+ addl 8(%esp), %ebx # 4-byte Folded Reload
+ adcl 12(%esp), %ecx # 4-byte Folded Reload
+ movl 32(%esp), %eax # 4-byte Reload
+ adcl 20(%esp), %eax # 4-byte Folded Reload
+ movl 36(%esp), %edi # 4-byte Reload
+ adcl 56(%esp), %edi # 4-byte Folded Reload
+ movl 40(%esp), %edx # 4-byte Reload
+ adcl $0, %edx
+ addl %ebp, 16(%esp) # 4-byte Folded Spill
+ adcl 24(%esp), %ebx # 4-byte Folded Reload
+ adcl 28(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 28(%esp) # 4-byte Spill
+ adcl 44(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 32(%esp) # 4-byte Spill
+ adcl 48(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 36(%esp) # 4-byte Spill
+ adcl 52(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 40(%esp) # 4-byte Spill
+ adcl $0, %esi
+ movl %esi, 56(%esp) # 4-byte Spill
+ adcl $0, 80(%esp) # 4-byte Folded Spill
+ movl 76(%esp), %esi # 4-byte Reload
+ imull %ebx, %esi
+ movl %esi, 76(%esp) # 4-byte Spill
+ movl %esi, %eax
+ mull 72(%esp) # 4-byte Folded Reload
+ movl %edx, %ebp
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl %esi, %eax
+ mull 68(%esp) # 4-byte Folded Reload
+ movl %edx, %edi
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl %esi, %eax
+ mull 64(%esp) # 4-byte Folded Reload
+ movl %edx, %ecx
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl %esi, %eax
+ mull 84(%esp) # 4-byte Folded Reload
+ movl %edx, 24(%esp) # 4-byte Spill
+ movl %eax, %esi
+ movl 76(%esp), %eax # 4-byte Reload
+ mull 60(%esp) # 4-byte Folded Reload
+ addl 24(%esp), %eax # 4-byte Folded Reload
+ adcl 44(%esp), %edx # 4-byte Folded Reload
+ adcl 48(%esp), %ecx # 4-byte Folded Reload
+ adcl 52(%esp), %edi # 4-byte Folded Reload
+ adcl $0, %ebp
+ addl %ebx, %esi
+ adcl 28(%esp), %eax # 4-byte Folded Reload
+ adcl 32(%esp), %edx # 4-byte Folded Reload
+ adcl 36(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 52(%esp) # 4-byte Spill
+ adcl 40(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 76(%esp) # 4-byte Spill
+ adcl 56(%esp), %ebp # 4-byte Folded Reload
+ movl %ebp, 56(%esp) # 4-byte Spill
+ movl 80(%esp), %ebx # 4-byte Reload
+ adcl $0, %ebx
+ movl %eax, %esi
+ subl 84(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 48(%esp) # 4-byte Spill
+ movl %edx, %esi
+ sbbl 60(%esp), %esi # 4-byte Folded Reload
+ sbbl 64(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, %ebp
+ sbbl 68(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 68(%esp) # 4-byte Spill
+ movl 56(%esp), %edi # 4-byte Reload
+ movl %edi, %ecx
+ sbbl 72(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 84(%esp) # 4-byte Spill
+ sbbl $0, %ebx
+ andl $1, %ebx
+ movl %ebx, 80(%esp) # 4-byte Spill
+ jne .LBB70_2
+# BB#1:
+ movl %esi, %edx
+.LBB70_2:
+ movl 80(%esp), %ebx # 4-byte Reload
+ testb %bl, %bl
+ jne .LBB70_4
+# BB#3:
+ movl 48(%esp), %eax # 4-byte Reload
+.LBB70_4:
+ movl 108(%esp), %ecx
+ movl %eax, (%ecx)
+ movl %edx, 4(%ecx)
+ movl 52(%esp), %eax # 4-byte Reload
+ jne .LBB70_6
+# BB#5:
+ movl %ebp, %eax
+.LBB70_6:
+ movl %eax, 8(%ecx)
+ movl 76(%esp), %eax # 4-byte Reload
+ jne .LBB70_8
+# BB#7:
+ movl 68(%esp), %eax # 4-byte Reload
+.LBB70_8:
+ movl %eax, 12(%ecx)
+ jne .LBB70_10
+# BB#9:
+ movl 84(%esp), %edi # 4-byte Reload
+.LBB70_10:
+ movl %edi, 16(%ecx)
+ addl $88, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end70:
+ .size mcl_fp_montRed5L, .Lfunc_end70-mcl_fp_montRed5L
+
+ .globl mcl_fp_addPre5L
+ .align 16, 0x90
+ .type mcl_fp_addPre5L,@function
+mcl_fp_addPre5L: # @mcl_fp_addPre5L
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ movl 28(%esp), %eax
+ movl (%eax), %ecx
+ movl 4(%eax), %edx
+ movl 24(%esp), %esi
+ addl (%esi), %ecx
+ adcl 4(%esi), %edx
+ movl 8(%eax), %edi
+ adcl 8(%esi), %edi
+ movl 12(%esi), %ebx
+ movl 16(%esi), %esi
+ adcl 12(%eax), %ebx
+ movl 16(%eax), %eax
+ movl 20(%esp), %ebp
+ movl %ecx, (%ebp)
+ movl %edx, 4(%ebp)
+ movl %edi, 8(%ebp)
+ movl %ebx, 12(%ebp)
+ adcl %esi, %eax
+ movl %eax, 16(%ebp)
+ sbbl %eax, %eax
+ andl $1, %eax
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end71:
+ .size mcl_fp_addPre5L, .Lfunc_end71-mcl_fp_addPre5L
+
+ .globl mcl_fp_subPre5L
+ .align 16, 0x90
+ .type mcl_fp_subPre5L,@function
+mcl_fp_subPre5L: # @mcl_fp_subPre5L
+# BB#0:
+ pushl %edi
+ pushl %esi
+ movl 16(%esp), %ecx
+ movl (%ecx), %edx
+ xorl %eax, %eax
+ movl 20(%esp), %esi
+ subl (%esi), %edx
+ movl 12(%esp), %edi
+ movl %edx, (%edi)
+ movl 4(%ecx), %edx
+ sbbl 4(%esi), %edx
+ movl %edx, 4(%edi)
+ movl 8(%ecx), %edx
+ sbbl 8(%esi), %edx
+ movl %edx, 8(%edi)
+ movl 12(%ecx), %edx
+ sbbl 12(%esi), %edx
+ movl %edx, 12(%edi)
+ movl 16(%esi), %edx
+ movl 16(%ecx), %ecx
+ sbbl %edx, %ecx
+ movl %ecx, 16(%edi)
+ sbbl $0, %eax
+ andl $1, %eax
+ popl %esi
+ popl %edi
+ retl
+.Lfunc_end72:
+ .size mcl_fp_subPre5L, .Lfunc_end72-mcl_fp_subPre5L
+
+ .globl mcl_fp_shr1_5L
+ .align 16, 0x90
+ .type mcl_fp_shr1_5L,@function
+mcl_fp_shr1_5L: # @mcl_fp_shr1_5L
+# BB#0:
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ movl 20(%esp), %eax
+ movl 16(%eax), %ecx
+ movl 12(%eax), %edx
+ movl 8(%eax), %esi
+ movl (%eax), %edi
+ movl 4(%eax), %eax
+ shrdl $1, %eax, %edi
+ movl 16(%esp), %ebx
+ movl %edi, (%ebx)
+ shrdl $1, %esi, %eax
+ movl %eax, 4(%ebx)
+ shrdl $1, %edx, %esi
+ movl %esi, 8(%ebx)
+ shrdl $1, %ecx, %edx
+ movl %edx, 12(%ebx)
+ shrl %ecx
+ movl %ecx, 16(%ebx)
+ popl %esi
+ popl %edi
+ popl %ebx
+ retl
+.Lfunc_end73:
+ .size mcl_fp_shr1_5L, .Lfunc_end73-mcl_fp_shr1_5L
+
+ .globl mcl_fp_add5L
+ .align 16, 0x90
+ .type mcl_fp_add5L,@function
+mcl_fp_add5L: # @mcl_fp_add5L
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ movl 28(%esp), %ebx
+ movl (%ebx), %eax
+ movl 4(%ebx), %ecx
+ movl 24(%esp), %edi
+ addl (%edi), %eax
+ adcl 4(%edi), %ecx
+ movl 8(%ebx), %edx
+ adcl 8(%edi), %edx
+ movl 12(%edi), %esi
+ movl 16(%edi), %edi
+ adcl 12(%ebx), %esi
+ adcl 16(%ebx), %edi
+ movl 20(%esp), %ebx
+ movl %eax, (%ebx)
+ movl %ecx, 4(%ebx)
+ movl %edx, 8(%ebx)
+ movl %esi, 12(%ebx)
+ movl %edi, 16(%ebx)
+ sbbl %ebx, %ebx
+ andl $1, %ebx
+ movl 32(%esp), %ebp
+ subl (%ebp), %eax
+ sbbl 4(%ebp), %ecx
+ sbbl 8(%ebp), %edx
+ sbbl 12(%ebp), %esi
+ sbbl 16(%ebp), %edi
+ sbbl $0, %ebx
+ testb $1, %bl
+ jne .LBB74_2
+# BB#1: # %nocarry
+ movl 20(%esp), %ebx
+ movl %eax, (%ebx)
+ movl %ecx, 4(%ebx)
+ movl %edx, 8(%ebx)
+ movl %esi, 12(%ebx)
+ movl %edi, 16(%ebx)
+.LBB74_2: # %carry
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end74:
+ .size mcl_fp_add5L, .Lfunc_end74-mcl_fp_add5L
+
+ .globl mcl_fp_addNF5L
+ .align 16, 0x90
+ .type mcl_fp_addNF5L,@function
+mcl_fp_addNF5L: # @mcl_fp_addNF5L
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $20, %esp
+ movl 48(%esp), %esi
+ movl (%esi), %ebx
+ movl 4(%esi), %eax
+ movl 44(%esp), %edi
+ addl (%edi), %ebx
+ adcl 4(%edi), %eax
+ movl 16(%esi), %ecx
+ movl 12(%esi), %edx
+ movl 8(%esi), %ebp
+ adcl 8(%edi), %ebp
+ adcl 12(%edi), %edx
+ movl %edx, 12(%esp) # 4-byte Spill
+ adcl 16(%edi), %ecx
+ movl %ecx, 16(%esp) # 4-byte Spill
+ movl 52(%esp), %edi
+ movl %ebx, %esi
+ subl (%edi), %esi
+ movl %esi, (%esp) # 4-byte Spill
+ movl %eax, %esi
+ sbbl 4(%edi), %esi
+ movl %esi, 4(%esp) # 4-byte Spill
+ movl %ebp, %esi
+ sbbl 8(%edi), %esi
+ sbbl 12(%edi), %edx
+ movl %edx, 8(%esp) # 4-byte Spill
+ movl %ecx, %edx
+ sbbl 16(%edi), %edx
+ movl %edx, %edi
+ sarl $31, %edi
+ testl %edi, %edi
+ js .LBB75_2
+# BB#1:
+ movl (%esp), %ebx # 4-byte Reload
+.LBB75_2:
+ movl 40(%esp), %edi
+ movl %ebx, (%edi)
+ js .LBB75_4
+# BB#3:
+ movl 4(%esp), %eax # 4-byte Reload
+.LBB75_4:
+ movl %eax, 4(%edi)
+ movl 12(%esp), %ecx # 4-byte Reload
+ js .LBB75_6
+# BB#5:
+ movl %esi, %ebp
+.LBB75_6:
+ movl %ebp, 8(%edi)
+ movl 16(%esp), %eax # 4-byte Reload
+ js .LBB75_8
+# BB#7:
+ movl 8(%esp), %ecx # 4-byte Reload
+.LBB75_8:
+ movl %ecx, 12(%edi)
+ js .LBB75_10
+# BB#9:
+ movl %edx, %eax
+.LBB75_10:
+ movl %eax, 16(%edi)
+ addl $20, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end75:
+ .size mcl_fp_addNF5L, .Lfunc_end75-mcl_fp_addNF5L
+
+ .globl mcl_fp_sub5L
+ .align 16, 0x90
+ .type mcl_fp_sub5L,@function
+mcl_fp_sub5L: # @mcl_fp_sub5L
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ movl 24(%esp), %edi
+ movl (%edi), %eax
+ movl 4(%edi), %ecx
+ xorl %ebx, %ebx
+ movl 28(%esp), %ebp
+ subl (%ebp), %eax
+ sbbl 4(%ebp), %ecx
+ movl 8(%edi), %edx
+ sbbl 8(%ebp), %edx
+ movl 12(%edi), %esi
+ sbbl 12(%ebp), %esi
+ movl 16(%edi), %edi
+ sbbl 16(%ebp), %edi
+ movl 20(%esp), %ebp
+ movl %eax, (%ebp)
+ movl %ecx, 4(%ebp)
+ movl %edx, 8(%ebp)
+ movl %esi, 12(%ebp)
+ movl %edi, 16(%ebp)
+ sbbl $0, %ebx
+ testb $1, %bl
+ je .LBB76_2
+# BB#1: # %carry
+ movl 32(%esp), %ebx
+ addl (%ebx), %eax
+ movl %eax, (%ebp)
+ adcl 4(%ebx), %ecx
+ movl %ecx, 4(%ebp)
+ adcl 8(%ebx), %edx
+ movl %edx, 8(%ebp)
+ movl 12(%ebx), %eax
+ adcl %esi, %eax
+ movl %eax, 12(%ebp)
+ movl 16(%ebx), %eax
+ adcl %edi, %eax
+ movl %eax, 16(%ebp)
+.LBB76_2: # %nocarry
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end76:
+ .size mcl_fp_sub5L, .Lfunc_end76-mcl_fp_sub5L
+
+ .globl mcl_fp_subNF5L
+ .align 16, 0x90
+ .type mcl_fp_subNF5L,@function
+mcl_fp_subNF5L: # @mcl_fp_subNF5L
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $16, %esp
+ movl 40(%esp), %edi
+ movl (%edi), %ecx
+ movl 4(%edi), %eax
+ movl 44(%esp), %ebx
+ subl (%ebx), %ecx
+ movl %ecx, 4(%esp) # 4-byte Spill
+ sbbl 4(%ebx), %eax
+ movl %eax, 12(%esp) # 4-byte Spill
+ movl 16(%edi), %esi
+ movl 12(%edi), %eax
+ movl 8(%edi), %ecx
+ sbbl 8(%ebx), %ecx
+ movl %ecx, (%esp) # 4-byte Spill
+ sbbl 12(%ebx), %eax
+ movl %eax, 8(%esp) # 4-byte Spill
+ sbbl 16(%ebx), %esi
+ movl %esi, %ebx
+ sarl $31, %ebx
+ movl %ebx, %ebp
+ shldl $1, %esi, %ebp
+ movl 48(%esp), %edi
+ movl 4(%edi), %ecx
+ andl %ebp, %ecx
+ andl (%edi), %ebp
+ movl 16(%edi), %edx
+ andl %ebx, %edx
+ movl 12(%edi), %eax
+ andl %ebx, %eax
+ roll %ebx
+ andl 8(%edi), %ebx
+ addl 4(%esp), %ebp # 4-byte Folded Reload
+ adcl 12(%esp), %ecx # 4-byte Folded Reload
+ movl 36(%esp), %edi
+ movl %ebp, (%edi)
+ adcl (%esp), %ebx # 4-byte Folded Reload
+ movl %ecx, 4(%edi)
+ adcl 8(%esp), %eax # 4-byte Folded Reload
+ movl %ebx, 8(%edi)
+ movl %eax, 12(%edi)
+ adcl %esi, %edx
+ movl %edx, 16(%edi)
+ addl $16, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end77:
+ .size mcl_fp_subNF5L, .Lfunc_end77-mcl_fp_subNF5L
+
+ .globl mcl_fpDbl_add5L
+ .align 16, 0x90
+ .type mcl_fpDbl_add5L,@function
+mcl_fpDbl_add5L: # @mcl_fpDbl_add5L
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $28, %esp
+ movl 56(%esp), %edx
+ movl 52(%esp), %ecx
+ movl 12(%ecx), %ebx
+ movl 16(%ecx), %ebp
+ movl 8(%edx), %esi
+ movl (%edx), %edi
+ addl (%ecx), %edi
+ movl 48(%esp), %eax
+ movl %edi, (%eax)
+ movl 4(%edx), %edi
+ adcl 4(%ecx), %edi
+ adcl 8(%ecx), %esi
+ adcl 12(%edx), %ebx
+ adcl 16(%edx), %ebp
+ movl %ebp, 20(%esp) # 4-byte Spill
+ movl %edi, 4(%eax)
+ movl 28(%edx), %edi
+ movl %edi, 24(%esp) # 4-byte Spill
+ movl %esi, 8(%eax)
+ movl 20(%edx), %esi
+ movl %ebx, 12(%eax)
+ movl 20(%ecx), %ebp
+ adcl %esi, %ebp
+ movl %ebp, 12(%esp) # 4-byte Spill
+ movl 24(%edx), %esi
+ movl 20(%esp), %edi # 4-byte Reload
+ movl %edi, 16(%eax)
+ movl 24(%ecx), %ebx
+ adcl %esi, %ebx
+ movl %ebx, 16(%esp) # 4-byte Spill
+ movl 28(%ecx), %edi
+ adcl 24(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 20(%esp) # 4-byte Spill
+ movl 32(%edx), %eax
+ movl 32(%ecx), %esi
+ adcl %eax, %esi
+ movl %esi, 24(%esp) # 4-byte Spill
+ movl 36(%edx), %eax
+ movl 36(%ecx), %edx
+ adcl %eax, %edx
+ sbbl %eax, %eax
+ andl $1, %eax
+ movl %ebp, %ecx
+ movl 60(%esp), %ebp
+ subl (%ebp), %ecx
+ movl %ecx, (%esp) # 4-byte Spill
+ sbbl 4(%ebp), %ebx
+ movl %ebx, 4(%esp) # 4-byte Spill
+ sbbl 8(%ebp), %edi
+ movl %edi, 8(%esp) # 4-byte Spill
+ movl %esi, %ebx
+ movl %edx, %esi
+ sbbl 12(%ebp), %ebx
+ sbbl 16(%ebp), %edx
+ sbbl $0, %eax
+ andl $1, %eax
+ jne .LBB78_2
+# BB#1:
+ movl %edx, %esi
+.LBB78_2:
+ testb %al, %al
+ movl 12(%esp), %ebp # 4-byte Reload
+ jne .LBB78_4
+# BB#3:
+ movl (%esp), %ebp # 4-byte Reload
+.LBB78_4:
+ movl 48(%esp), %eax
+ movl %ebp, 20(%eax)
+ movl 24(%esp), %ecx # 4-byte Reload
+ movl 20(%esp), %edx # 4-byte Reload
+ movl 16(%esp), %edi # 4-byte Reload
+ jne .LBB78_6
+# BB#5:
+ movl 4(%esp), %edi # 4-byte Reload
+.LBB78_6:
+ movl %edi, 24(%eax)
+ jne .LBB78_8
+# BB#7:
+ movl 8(%esp), %edx # 4-byte Reload
+.LBB78_8:
+ movl %edx, 28(%eax)
+ jne .LBB78_10
+# BB#9:
+ movl %ebx, %ecx
+.LBB78_10:
+ movl %ecx, 32(%eax)
+ movl %esi, 36(%eax)
+ addl $28, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end78:
+ .size mcl_fpDbl_add5L, .Lfunc_end78-mcl_fpDbl_add5L
+
+ .globl mcl_fpDbl_sub5L
+ .align 16, 0x90
+ .type mcl_fpDbl_sub5L,@function
+mcl_fpDbl_sub5L: # @mcl_fpDbl_sub5L
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $16, %esp
+ movl 40(%esp), %eax
+ movl (%eax), %esi
+ movl 4(%eax), %edi
+ movl 44(%esp), %edx
+ subl (%edx), %esi
+ sbbl 4(%edx), %edi
+ movl 8(%eax), %ebx
+ sbbl 8(%edx), %ebx
+ movl 36(%esp), %ecx
+ movl %esi, (%ecx)
+ movl 12(%eax), %esi
+ sbbl 12(%edx), %esi
+ movl %edi, 4(%ecx)
+ movl 16(%eax), %edi
+ sbbl 16(%edx), %edi
+ movl %ebx, 8(%ecx)
+ movl 20(%edx), %ebx
+ movl %esi, 12(%ecx)
+ movl 20(%eax), %esi
+ sbbl %ebx, %esi
+ movl %esi, 4(%esp) # 4-byte Spill
+ movl 24(%edx), %esi
+ movl %edi, 16(%ecx)
+ movl 24(%eax), %ebp
+ sbbl %esi, %ebp
+ movl 28(%edx), %esi
+ movl 28(%eax), %edi
+ sbbl %esi, %edi
+ movl %edi, (%esp) # 4-byte Spill
+ movl 32(%edx), %esi
+ movl 32(%eax), %edi
+ sbbl %esi, %edi
+ movl %edi, 8(%esp) # 4-byte Spill
+ movl 36(%edx), %edx
+ movl 36(%eax), %eax
+ sbbl %edx, %eax
+ movl %eax, 12(%esp) # 4-byte Spill
+ movl $0, %edx
+ sbbl $0, %edx
+ andl $1, %edx
+ movl 48(%esp), %ebx
+ jne .LBB79_1
+# BB#2:
+ xorl %eax, %eax
+ jmp .LBB79_3
+.LBB79_1:
+ movl 16(%ebx), %eax
+.LBB79_3:
+ testb %dl, %dl
+ jne .LBB79_4
+# BB#5:
+ movl $0, %edx
+ movl $0, %esi
+ jmp .LBB79_6
+.LBB79_4:
+ movl (%ebx), %esi
+ movl 4(%ebx), %edx
+.LBB79_6:
+ jne .LBB79_7
+# BB#8:
+ movl $0, %edi
+ jmp .LBB79_9
+.LBB79_7:
+ movl 12(%ebx), %edi
+.LBB79_9:
+ jne .LBB79_10
+# BB#11:
+ xorl %ebx, %ebx
+ jmp .LBB79_12
+.LBB79_10:
+ movl 8(%ebx), %ebx
+.LBB79_12:
+ addl 4(%esp), %esi # 4-byte Folded Reload
+ adcl %ebp, %edx
+ movl %esi, 20(%ecx)
+ adcl (%esp), %ebx # 4-byte Folded Reload
+ movl %edx, 24(%ecx)
+ adcl 8(%esp), %edi # 4-byte Folded Reload
+ movl %ebx, 28(%ecx)
+ movl %edi, 32(%ecx)
+ adcl 12(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 36(%ecx)
+ addl $16, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end79:
+ .size mcl_fpDbl_sub5L, .Lfunc_end79-mcl_fpDbl_sub5L
+
+ .globl mcl_fp_mulUnitPre6L
+ .align 16, 0x90
+ .type mcl_fp_mulUnitPre6L,@function
+mcl_fp_mulUnitPre6L: # @mcl_fp_mulUnitPre6L
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $28, %esp
+ movl 56(%esp), %ebx
+ movl 52(%esp), %edi
+ movl %ebx, %eax
+ mull 20(%edi)
+ movl %edx, 24(%esp) # 4-byte Spill
+ movl %eax, 20(%esp) # 4-byte Spill
+ movl %ebx, %eax
+ mull 16(%edi)
+ movl %edx, 16(%esp) # 4-byte Spill
+ movl %eax, 12(%esp) # 4-byte Spill
+ movl %ebx, %eax
+ mull 12(%edi)
+ movl %edx, %esi
+ movl %eax, 8(%esp) # 4-byte Spill
+ movl %ebx, %eax
+ mull 8(%edi)
+ movl %edx, %ebp
+ movl %eax, 4(%esp) # 4-byte Spill
+ movl %ebx, %eax
+ mull 4(%edi)
+ movl %edx, %ecx
+ movl %eax, (%esp) # 4-byte Spill
+ movl %ebx, %eax
+ mull (%edi)
+ movl 48(%esp), %edi
+ movl %eax, (%edi)
+ addl (%esp), %edx # 4-byte Folded Reload
+ movl %edx, 4(%edi)
+ adcl 4(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 8(%edi)
+ adcl 8(%esp), %ebp # 4-byte Folded Reload
+ movl %ebp, 12(%edi)
+ adcl 12(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 16(%edi)
+ movl 16(%esp), %eax # 4-byte Reload
+ adcl 20(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 20(%edi)
+ movl 24(%esp), %eax # 4-byte Reload
+ adcl $0, %eax
+ movl %eax, 24(%edi)
+ addl $28, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end80:
+ .size mcl_fp_mulUnitPre6L, .Lfunc_end80-mcl_fp_mulUnitPre6L
+
+ .globl mcl_fpDbl_mulPre6L
+ .align 16, 0x90
+ .type mcl_fpDbl_mulPre6L,@function
+mcl_fpDbl_mulPre6L: # @mcl_fpDbl_mulPre6L
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $84, %esp
+ movl 108(%esp), %esi
+ movl (%esi), %ebp
+ movl 112(%esp), %eax
+ movl (%eax), %edi
+ movl %ebp, %eax
+ mull %edi
+ movl %edx, 40(%esp) # 4-byte Spill
+ movl 104(%esp), %edx
+ movl %eax, (%edx)
+ movl 4(%esi), %ebx
+ movl 8(%esi), %eax
+ movl %eax, 12(%esp) # 4-byte Spill
+ movl 12(%esi), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 16(%esi), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 20(%esi), %ecx
+ movl 112(%esp), %eax
+ movl 4(%eax), %esi
+ movl %ebx, %eax
+ mull %esi
+ movl %edx, 72(%esp) # 4-byte Spill
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl %ebp, %eax
+ mull %esi
+ movl %edx, 60(%esp) # 4-byte Spill
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl %ebx, %eax
+ mull %edi
+ movl %edx, 20(%esp) # 4-byte Spill
+ movl %eax, %ebp
+ movl %ecx, %eax
+ mull %esi
+ movl %edx, 68(%esp) # 4-byte Spill
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ mull %esi
+ movl %edx, 56(%esp) # 4-byte Spill
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ mull %esi
+ movl %edx, 52(%esp) # 4-byte Spill
+ movl %eax, 24(%esp) # 4-byte Spill
+ movl 12(%esp), %ebx # 4-byte Reload
+ movl %ebx, %eax
+ mull %esi
+ movl %edx, 36(%esp) # 4-byte Spill
+ movl %eax, 16(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ mull %edi
+ movl %edx, 64(%esp) # 4-byte Spill
+ movl %eax, 8(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ mull %edi
+ movl %edx, %ecx
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ mull %edi
+ movl %edx, %esi
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl %ebx, %eax
+ mull %edi
+ movl %eax, %ebx
+ movl %edx, %edi
+ addl 40(%esp), %ebp # 4-byte Folded Reload
+ adcl 20(%esp), %ebx # 4-byte Folded Reload
+ adcl 80(%esp), %edi # 4-byte Folded Reload
+ adcl 76(%esp), %esi # 4-byte Folded Reload
+ adcl 8(%esp), %ecx # 4-byte Folded Reload
+ movl 64(%esp), %edx # 4-byte Reload
+ adcl $0, %edx
+ addl 32(%esp), %ebp # 4-byte Folded Reload
+ movl 104(%esp), %eax
+ movl %ebp, 4(%eax)
+ adcl 48(%esp), %ebx # 4-byte Folded Reload
+ adcl 16(%esp), %edi # 4-byte Folded Reload
+ adcl 24(%esp), %esi # 4-byte Folded Reload
+ movl %ecx, %eax
+ adcl 28(%esp), %eax # 4-byte Folded Reload
+ movl %edx, %ecx
+ adcl 44(%esp), %ecx # 4-byte Folded Reload
+ sbbl %edx, %edx
+ andl $1, %edx
+ addl 60(%esp), %ebx # 4-byte Folded Reload
+ adcl 72(%esp), %edi # 4-byte Folded Reload
+ adcl 36(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 12(%esp) # 4-byte Spill
+ adcl 52(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 44(%esp) # 4-byte Spill
+ adcl 56(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 64(%esp) # 4-byte Spill
+ adcl 68(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 28(%esp) # 4-byte Spill
+ movl 108(%esp), %ebp
+ movl 20(%ebp), %edx
+ movl %edx, 20(%esp) # 4-byte Spill
+ movl 112(%esp), %eax
+ movl 8(%eax), %ecx
+ movl %edx, %eax
+ mull %ecx
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl %edx, 60(%esp) # 4-byte Spill
+ movl 16(%ebp), %eax
+ movl %eax, 16(%esp) # 4-byte Spill
+ mull %ecx
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl %edx, 56(%esp) # 4-byte Spill
+ movl 12(%ebp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ mull %ecx
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl %edx, 52(%esp) # 4-byte Spill
+ movl 8(%ebp), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ mull %ecx
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl %edx, 48(%esp) # 4-byte Spill
+ movl (%ebp), %esi
+ movl 4(%ebp), %eax
+ movl %eax, 8(%esp) # 4-byte Spill
+ mull %ecx
+ movl %edx, 40(%esp) # 4-byte Spill
+ movl %eax, %ebp
+ movl %esi, %eax
+ mull %ecx
+ movl %edx, 24(%esp) # 4-byte Spill
+ addl %ebx, %eax
+ movl 104(%esp), %ecx
+ movl %eax, 8(%ecx)
+ adcl %edi, %ebp
+ movl 12(%esp), %eax # 4-byte Reload
+ adcl %eax, 72(%esp) # 4-byte Folded Spill
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl %eax, 76(%esp) # 4-byte Folded Spill
+ movl 68(%esp), %ebx # 4-byte Reload
+ adcl 64(%esp), %ebx # 4-byte Folded Reload
+ movl 28(%esp), %eax # 4-byte Reload
+ adcl %eax, 80(%esp) # 4-byte Folded Spill
+ sbbl %edi, %edi
+ movl 112(%esp), %eax
+ movl 12(%eax), %ecx
+ movl 20(%esp), %eax # 4-byte Reload
+ mull %ecx
+ movl %edx, 64(%esp) # 4-byte Spill
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 16(%esp), %eax # 4-byte Reload
+ mull %ecx
+ movl %edx, 44(%esp) # 4-byte Spill
+ movl %eax, 20(%esp) # 4-byte Spill
+ movl 8(%esp), %eax # 4-byte Reload
+ mull %ecx
+ movl %edx, 28(%esp) # 4-byte Spill
+ movl %eax, 12(%esp) # 4-byte Spill
+ movl %esi, %eax
+ mull %ecx
+ movl %edx, 16(%esp) # 4-byte Spill
+ movl %eax, 4(%esp) # 4-byte Spill
+ movl 36(%esp), %eax # 4-byte Reload
+ mull %ecx
+ movl %edx, 36(%esp) # 4-byte Spill
+ movl %eax, 8(%esp) # 4-byte Spill
+ movl 32(%esp), %eax # 4-byte Reload
+ mull %ecx
+ movl %eax, (%esp) # 4-byte Spill
+ movl %edx, 32(%esp) # 4-byte Spill
+ andl $1, %edi
+ addl 24(%esp), %ebp # 4-byte Folded Reload
+ movl 72(%esp), %esi # 4-byte Reload
+ adcl 40(%esp), %esi # 4-byte Folded Reload
+ movl 76(%esp), %edx # 4-byte Reload
+ adcl 48(%esp), %edx # 4-byte Folded Reload
+ movl %ebx, %ecx
+ adcl 52(%esp), %ecx # 4-byte Folded Reload
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 56(%esp), %eax # 4-byte Folded Reload
+ adcl 60(%esp), %edi # 4-byte Folded Reload
+ addl 4(%esp), %ebp # 4-byte Folded Reload
+ movl 104(%esp), %ebx
+ movl %ebp, 12(%ebx)
+ movl %esi, %ebx
+ adcl 12(%esp), %ebx # 4-byte Folded Reload
+ movl %edx, %esi
+ adcl (%esp), %esi # 4-byte Folded Reload
+ adcl 8(%esp), %ecx # 4-byte Folded Reload
+ adcl 20(%esp), %eax # 4-byte Folded Reload
+ adcl 68(%esp), %edi # 4-byte Folded Reload
+ sbbl %edx, %edx
+ andl $1, %edx
+ addl 16(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 72(%esp) # 4-byte Spill
+ adcl 28(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 76(%esp) # 4-byte Spill
+ adcl 32(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 68(%esp) # 4-byte Spill
+ adcl 36(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 80(%esp) # 4-byte Spill
+ adcl 44(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 44(%esp) # 4-byte Spill
+ adcl 64(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 40(%esp) # 4-byte Spill
+ movl 108(%esp), %eax
+ movl %eax, %ecx
+ movl 20(%ecx), %eax
+ movl %eax, 20(%esp) # 4-byte Spill
+ movl 16(%ecx), %edx
+ movl %edx, 56(%esp) # 4-byte Spill
+ movl 12(%ecx), %edi
+ movl %edi, 16(%esp) # 4-byte Spill
+ movl 8(%ecx), %ebx
+ movl %ebx, 12(%esp) # 4-byte Spill
+ movl (%ecx), %edx
+ movl %edx, 60(%esp) # 4-byte Spill
+ movl 4(%ecx), %ebp
+ movl %ebp, 4(%esp) # 4-byte Spill
+ movl 112(%esp), %esi
+ movl 16(%esi), %ecx
+ mull %ecx
+ movl %edx, 52(%esp) # 4-byte Spill
+ movl %eax, %esi
+ movl 56(%esp), %eax # 4-byte Reload
+ mull %ecx
+ movl %edx, 48(%esp) # 4-byte Spill
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl %edi, %eax
+ mull %ecx
+ movl %edx, 36(%esp) # 4-byte Spill
+ movl %eax, %edi
+ movl %ebx, %eax
+ mull %ecx
+ movl %edx, 32(%esp) # 4-byte Spill
+ movl %eax, %ebx
+ movl %ebp, %eax
+ mull %ecx
+ movl %edx, 28(%esp) # 4-byte Spill
+ movl %eax, %ebp
+ movl 60(%esp), %eax # 4-byte Reload
+ mull %ecx
+ movl %edx, 24(%esp) # 4-byte Spill
+ addl 72(%esp), %eax # 4-byte Folded Reload
+ movl 104(%esp), %ecx
+ movl %eax, 16(%ecx)
+ adcl 76(%esp), %ebp # 4-byte Folded Reload
+ adcl 68(%esp), %ebx # 4-byte Folded Reload
+ adcl 80(%esp), %edi # 4-byte Folded Reload
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl %eax, 64(%esp) # 4-byte Folded Spill
+ adcl 40(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 8(%esp) # 4-byte Spill
+ movl 112(%esp), %eax
+ movl 20(%eax), %ecx
+ sbbl %esi, %esi
+ movl 20(%esp), %eax # 4-byte Reload
+ mull %ecx
+ movl %edx, 80(%esp) # 4-byte Spill
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 4(%esp), %eax # 4-byte Reload
+ mull %ecx
+ movl %edx, 72(%esp) # 4-byte Spill
+ movl %eax, 20(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ mull %ecx
+ movl %edx, 60(%esp) # 4-byte Spill
+ movl %eax, 4(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ mull %ecx
+ movl %edx, 76(%esp) # 4-byte Spill
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 16(%esp), %eax # 4-byte Reload
+ mull %ecx
+ movl %edx, 68(%esp) # 4-byte Spill
+ movl %eax, 16(%esp) # 4-byte Spill
+ movl 12(%esp), %eax # 4-byte Reload
+ mull %ecx
+ movl %eax, 12(%esp) # 4-byte Spill
+ movl %edx, 56(%esp) # 4-byte Spill
+ andl $1, %esi
+ addl 24(%esp), %ebp # 4-byte Folded Reload
+ adcl 28(%esp), %ebx # 4-byte Folded Reload
+ adcl 32(%esp), %edi # 4-byte Folded Reload
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 36(%esp), %eax # 4-byte Folded Reload
+ movl 8(%esp), %ecx # 4-byte Reload
+ adcl 48(%esp), %ecx # 4-byte Folded Reload
+ adcl 52(%esp), %esi # 4-byte Folded Reload
+ addl 4(%esp), %ebp # 4-byte Folded Reload
+ movl 104(%esp), %edx
+ movl %ebp, 20(%edx)
+ adcl 20(%esp), %ebx # 4-byte Folded Reload
+ adcl 12(%esp), %edi # 4-byte Folded Reload
+ adcl 16(%esp), %eax # 4-byte Folded Reload
+ movl %eax, %edx
+ movl %ecx, %ebp
+ adcl 40(%esp), %ebp # 4-byte Folded Reload
+ adcl 44(%esp), %esi # 4-byte Folded Reload
+ sbbl %eax, %eax
+ andl $1, %eax
+ addl 60(%esp), %ebx # 4-byte Folded Reload
+ adcl 72(%esp), %edi # 4-byte Folded Reload
+ movl 104(%esp), %ecx
+ movl %ebx, 24(%ecx)
+ movl %edx, %ebx
+ adcl 56(%esp), %ebx # 4-byte Folded Reload
+ movl %edi, 28(%ecx)
+ movl %ebp, %edx
+ adcl 68(%esp), %edx # 4-byte Folded Reload
+ movl %ebx, 32(%ecx)
+ adcl 76(%esp), %esi # 4-byte Folded Reload
+ movl %edx, 36(%ecx)
+ movl %esi, 40(%ecx)
+ adcl 80(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 44(%ecx)
+ addl $84, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end81:
+ .size mcl_fpDbl_mulPre6L, .Lfunc_end81-mcl_fpDbl_mulPre6L
+
+ .globl mcl_fpDbl_sqrPre6L
+ .align 16, 0x90
+ .type mcl_fpDbl_sqrPre6L,@function
+mcl_fpDbl_sqrPre6L: # @mcl_fpDbl_sqrPre6L
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $100, %esp
+ movl 124(%esp), %esi
+ movl 20(%esi), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl (%esi), %ebp
+ movl 4(%esi), %ebx
+ mull %ebx
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl %edx, 84(%esp) # 4-byte Spill
+ movl 16(%esi), %ecx
+ movl %ecx, %eax
+ mull %ebx
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl %edx, 80(%esp) # 4-byte Spill
+ movl 12(%esi), %edi
+ movl %edi, %eax
+ mull %ebx
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl %edx, 76(%esp) # 4-byte Spill
+ movl 124(%esp), %eax
+ movl 8(%eax), %esi
+ movl %esi, %eax
+ mull %ebx
+ movl %edx, 68(%esp) # 4-byte Spill
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ mull %ebp
+ movl %edx, 48(%esp) # 4-byte Spill
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ mull %ebp
+ movl %edx, 96(%esp) # 4-byte Spill
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl %edi, %eax
+ mull %ebp
+ movl %edx, 92(%esp) # 4-byte Spill
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl %esi, %eax
+ mull %ebp
+ movl %edx, 88(%esp) # 4-byte Spill
+ movl %eax, %ecx
+ movl %ebx, %eax
+ mull %ebx
+ movl %edx, 52(%esp) # 4-byte Spill
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl %ebx, %eax
+ mull %ebp
+ movl %edx, %esi
+ movl %esi, 32(%esp) # 4-byte Spill
+ movl %eax, %edi
+ movl %ebp, %eax
+ mull %ebp
+ movl 120(%esp), %ebx
+ movl %eax, (%ebx)
+ addl %edi, %edx
+ adcl %esi, %ecx
+ movl %ecx, %ebx
+ movl 88(%esp), %ecx # 4-byte Reload
+ adcl 28(%esp), %ecx # 4-byte Folded Reload
+ movl 92(%esp), %esi # 4-byte Reload
+ adcl 40(%esp), %esi # 4-byte Folded Reload
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 44(%esp), %eax # 4-byte Folded Reload
+ movl 48(%esp), %ebp # 4-byte Reload
+ adcl $0, %ebp
+ addl %edi, %edx
+ movl 120(%esp), %edi
+ movl %edx, 4(%edi)
+ adcl 36(%esp), %ebx # 4-byte Folded Reload
+ adcl 56(%esp), %ecx # 4-byte Folded Reload
+ movl %esi, %edx
+ adcl 60(%esp), %edx # 4-byte Folded Reload
+ adcl 64(%esp), %eax # 4-byte Folded Reload
+ movl %eax, %edi
+ adcl 72(%esp), %ebp # 4-byte Folded Reload
+ sbbl %eax, %eax
+ andl $1, %eax
+ movl %ebx, %esi
+ addl 32(%esp), %esi # 4-byte Folded Reload
+ adcl 52(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 88(%esp) # 4-byte Spill
+ adcl 68(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 92(%esp) # 4-byte Spill
+ adcl 76(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 96(%esp) # 4-byte Spill
+ adcl 80(%esp), %ebp # 4-byte Folded Reload
+ movl %ebp, %edi
+ adcl 84(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 124(%esp), %ebx
+ movl 20(%ebx), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 8(%ebx), %ebp
+ mull %ebp
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl %edx, 60(%esp) # 4-byte Spill
+ movl 16(%ebx), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ mull %ebp
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl %edx, 52(%esp) # 4-byte Spill
+ movl 12(%ebx), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ mull %ebp
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl %edx, 76(%esp) # 4-byte Spill
+ movl (%ebx), %ecx
+ movl %ecx, 56(%esp) # 4-byte Spill
+ movl 4(%ebx), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ mull %ebp
+ movl %edx, 40(%esp) # 4-byte Spill
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ mull %ebp
+ movl %edx, 32(%esp) # 4-byte Spill
+ movl %eax, %ebx
+ movl %ebp, %eax
+ mull %ebp
+ movl %eax, %ebp
+ addl %esi, %ebx
+ movl 120(%esp), %eax
+ movl %ebx, 8(%eax)
+ movl 28(%esp), %ecx # 4-byte Reload
+ adcl 88(%esp), %ecx # 4-byte Folded Reload
+ adcl 92(%esp), %ebp # 4-byte Folded Reload
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 68(%esp), %eax # 4-byte Folded Reload
+ movl 80(%esp), %ebx # 4-byte Reload
+ adcl %edi, %ebx
+ movl 84(%esp), %esi # 4-byte Reload
+ adcl 44(%esp), %esi # 4-byte Folded Reload
+ sbbl %edi, %edi
+ andl $1, %edi
+ addl 32(%esp), %ecx # 4-byte Folded Reload
+ adcl 40(%esp), %ebp # 4-byte Folded Reload
+ movl %ebp, 96(%esp) # 4-byte Spill
+ adcl %edx, %eax
+ movl %eax, %ebp
+ adcl 76(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 80(%esp) # 4-byte Spill
+ adcl 52(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 84(%esp) # 4-byte Spill
+ adcl 60(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 32(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ movl 36(%esp), %edi # 4-byte Reload
+ mull %edi
+ movl %edx, 60(%esp) # 4-byte Spill
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ mull %edi
+ movl %edx, 52(%esp) # 4-byte Spill
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ mull %edi
+ movl %edx, 44(%esp) # 4-byte Spill
+ movl %eax, %ebx
+ movl 56(%esp), %eax # 4-byte Reload
+ mull %edi
+ movl %edx, 40(%esp) # 4-byte Spill
+ movl %eax, %esi
+ movl %edi, %eax
+ mull %edi
+ movl %eax, %edi
+ movl %edx, 36(%esp) # 4-byte Spill
+ addl %ecx, %esi
+ movl 120(%esp), %eax
+ movl %esi, 12(%eax)
+ adcl 96(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 72(%esp) # 4-byte Spill
+ adcl 68(%esp), %ebp # 4-byte Folded Reload
+ movl %ebp, 96(%esp) # 4-byte Spill
+ adcl 80(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 80(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl %eax, 88(%esp) # 4-byte Folded Spill
+ movl 32(%esp), %eax # 4-byte Reload
+ adcl %eax, 92(%esp) # 4-byte Folded Spill
+ sbbl %eax, %eax
+ andl $1, %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 124(%esp), %ecx
+ movl (%ecx), %ebx
+ movl 4(%ecx), %edi
+ movl 20(%ecx), %ebp
+ movl %edi, %eax
+ mull %ebp
+ movl %edx, 68(%esp) # 4-byte Spill
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl %ebx, %eax
+ mull %ebp
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl %edx, 64(%esp) # 4-byte Spill
+ movl 16(%ecx), %esi
+ movl %edi, %eax
+ mull %esi
+ movl %edx, 32(%esp) # 4-byte Spill
+ movl %eax, 20(%esp) # 4-byte Spill
+ movl %ebx, %eax
+ mull %esi
+ movl %eax, 12(%esp) # 4-byte Spill
+ movl %edx, 28(%esp) # 4-byte Spill
+ movl 40(%esp), %eax # 4-byte Reload
+ addl %eax, 72(%esp) # 4-byte Folded Spill
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl %eax, 96(%esp) # 4-byte Folded Spill
+ movl 80(%esp), %ecx # 4-byte Reload
+ adcl 76(%esp), %ecx # 4-byte Folded Reload
+ movl 36(%esp), %eax # 4-byte Reload
+ adcl %eax, 88(%esp) # 4-byte Folded Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl %eax, 92(%esp) # 4-byte Folded Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 60(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 124(%esp), %eax
+ movl 12(%eax), %edi
+ movl 8(%eax), %ebx
+ movl %edi, %eax
+ mull %ebp
+ movl %edx, 60(%esp) # 4-byte Spill
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl %edi, %eax
+ mull %esi
+ movl %edx, 16(%esp) # 4-byte Spill
+ movl %eax, 4(%esp) # 4-byte Spill
+ movl %ebx, %eax
+ mull %ebp
+ movl %edx, 44(%esp) # 4-byte Spill
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl %ebx, %eax
+ mull %esi
+ movl %edx, 8(%esp) # 4-byte Spill
+ movl %eax, %edi
+ movl %ebp, %eax
+ mull %esi
+ movl %edx, 80(%esp) # 4-byte Spill
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl %ebp, %eax
+ mull %ebp
+ movl %edx, 52(%esp) # 4-byte Spill
+ movl %eax, 24(%esp) # 4-byte Spill
+ movl %esi, %eax
+ mull %esi
+ movl %eax, %ebx
+ movl %edx, (%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ addl 12(%esp), %eax # 4-byte Folded Reload
+ movl 120(%esp), %ebp
+ movl %eax, 16(%ebp)
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 20(%esp), %eax # 4-byte Folded Reload
+ adcl %ecx, %edi
+ movl 4(%esp), %ecx # 4-byte Reload
+ adcl 88(%esp), %ecx # 4-byte Folded Reload
+ adcl 92(%esp), %ebx # 4-byte Folded Reload
+ movl 84(%esp), %edx # 4-byte Reload
+ adcl 76(%esp), %edx # 4-byte Folded Reload
+ sbbl %esi, %esi
+ andl $1, %esi
+ addl 28(%esp), %eax # 4-byte Folded Reload
+ adcl 32(%esp), %edi # 4-byte Folded Reload
+ adcl 8(%esp), %ecx # 4-byte Folded Reload
+ adcl 16(%esp), %ebx # 4-byte Folded Reload
+ adcl (%esp), %edx # 4-byte Folded Reload
+ adcl 80(%esp), %esi # 4-byte Folded Reload
+ addl 48(%esp), %eax # 4-byte Folded Reload
+ movl 120(%esp), %ebp
+ movl %eax, 20(%ebp)
+ adcl 56(%esp), %edi # 4-byte Folded Reload
+ adcl 36(%esp), %ecx # 4-byte Folded Reload
+ adcl 40(%esp), %ebx # 4-byte Folded Reload
+ movl %edx, %eax
+ adcl 76(%esp), %eax # 4-byte Folded Reload
+ adcl 24(%esp), %esi # 4-byte Folded Reload
+ sbbl %edx, %edx
+ andl $1, %edx
+ addl 64(%esp), %edi # 4-byte Folded Reload
+ adcl 68(%esp), %ecx # 4-byte Folded Reload
+ movl 120(%esp), %ebp
+ movl %edi, 24(%ebp)
+ adcl 44(%esp), %ebx # 4-byte Folded Reload
+ movl %ecx, 28(%ebp)
+ movl %eax, %edi
+ adcl 60(%esp), %edi # 4-byte Folded Reload
+ movl %ebx, 32(%ebp)
+ adcl 80(%esp), %esi # 4-byte Folded Reload
+ movl %edi, 36(%ebp)
+ movl %esi, 40(%ebp)
+ adcl 52(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 44(%ebp)
+ addl $100, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end82:
+ .size mcl_fpDbl_sqrPre6L, .Lfunc_end82-mcl_fpDbl_sqrPre6L
+
+ .globl mcl_fp_mont6L
+ .align 16, 0x90
+ .type mcl_fp_mont6L,@function
+mcl_fp_mont6L: # @mcl_fp_mont6L
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $132, %esp
+ movl 156(%esp), %edi
+ movl (%edi), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 160(%esp), %ecx
+ movl (%ecx), %ecx
+ movl %ecx, 40(%esp) # 4-byte Spill
+ mull %ecx
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl %edx, 64(%esp) # 4-byte Spill
+ movl 164(%esp), %edx
+ movl -4(%edx), %ecx
+ movl %ecx, 112(%esp) # 4-byte Spill
+ movl %eax, %ebp
+ imull %ecx, %ebp
+ movl (%edx), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 20(%edx), %eax
+ movl %eax, 128(%esp) # 4-byte Spill
+ movl 16(%edx), %eax
+ movl %eax, 124(%esp) # 4-byte Spill
+ movl 12(%edx), %eax
+ movl %eax, 120(%esp) # 4-byte Spill
+ movl 8(%edx), %eax
+ movl %eax, 116(%esp) # 4-byte Spill
+ movl 4(%edx), %esi
+ movl %esi, 104(%esp) # 4-byte Spill
+ movl 4(%edi), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl %edi, %eax
+ movl 20(%eax), %edi
+ movl %edi, 88(%esp) # 4-byte Spill
+ movl 16(%eax), %ecx
+ movl %ecx, 84(%esp) # 4-byte Spill
+ movl 12(%eax), %ebx
+ movl %ebx, 80(%esp) # 4-byte Spill
+ movl 8(%eax), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl %ebp, %eax
+ mull 128(%esp) # 4-byte Folded Reload
+ movl %edx, 60(%esp) # 4-byte Spill
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl %ebp, %eax
+ mull 124(%esp) # 4-byte Folded Reload
+ movl %edx, 56(%esp) # 4-byte Spill
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl %ebp, %eax
+ mull 120(%esp) # 4-byte Folded Reload
+ movl %edx, 52(%esp) # 4-byte Spill
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl %ebp, %eax
+ mull 116(%esp) # 4-byte Folded Reload
+ movl %edx, 48(%esp) # 4-byte Spill
+ movl %eax, 24(%esp) # 4-byte Spill
+ movl %ebp, %eax
+ mull %esi
+ movl %edx, 44(%esp) # 4-byte Spill
+ movl %eax, 16(%esp) # 4-byte Spill
+ movl %ebp, %eax
+ mull 108(%esp) # 4-byte Folded Reload
+ movl %edx, %esi
+ movl %eax, 20(%esp) # 4-byte Spill
+ movl %edi, %eax
+ movl 40(%esp), %ebp # 4-byte Reload
+ mull %ebp
+ movl %edx, 76(%esp) # 4-byte Spill
+ movl %eax, 12(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ mull %ebp
+ movl %edx, 72(%esp) # 4-byte Spill
+ movl %eax, (%esp) # 4-byte Spill
+ movl %ebx, %eax
+ mull %ebp
+ movl %edx, %ebx
+ movl %eax, 8(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ mull %ebp
+ movl %ebp, %ecx
+ movl %edx, %ebp
+ movl %eax, 4(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ mull %ecx
+ movl %eax, %edi
+ addl 64(%esp), %edi # 4-byte Folded Reload
+ adcl 4(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 4(%esp) # 4-byte Spill
+ adcl 8(%esp), %ebp # 4-byte Folded Reload
+ movl %ebp, 8(%esp) # 4-byte Spill
+ adcl (%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 64(%esp) # 4-byte Spill
+ movl 12(%esp), %eax # 4-byte Reload
+ adcl %eax, 72(%esp) # 4-byte Folded Spill
+ adcl $0, 76(%esp) # 4-byte Folded Spill
+ addl 16(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 40(%esp) # 4-byte Spill
+ movl 44(%esp), %esi # 4-byte Reload
+ adcl 24(%esp), %esi # 4-byte Folded Reload
+ movl 48(%esp), %edx # 4-byte Reload
+ adcl 28(%esp), %edx # 4-byte Folded Reload
+ movl 52(%esp), %ecx # 4-byte Reload
+ adcl 32(%esp), %ecx # 4-byte Folded Reload
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 36(%esp), %eax # 4-byte Folded Reload
+ movl 60(%esp), %ebp # 4-byte Reload
+ adcl $0, %ebp
+ movl 20(%esp), %ebx # 4-byte Reload
+ addl 68(%esp), %ebx # 4-byte Folded Reload
+ adcl %edi, 40(%esp) # 4-byte Folded Spill
+ adcl 4(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 44(%esp) # 4-byte Spill
+ adcl 8(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 48(%esp) # 4-byte Spill
+ adcl 64(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 52(%esp) # 4-byte Spill
+ adcl 72(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 56(%esp) # 4-byte Spill
+ adcl 76(%esp), %ebp # 4-byte Folded Reload
+ movl %ebp, 60(%esp) # 4-byte Spill
+ sbbl %eax, %eax
+ andl $1, %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 160(%esp), %eax
+ movl 4(%eax), %ecx
+ movl %ecx, %eax
+ mull 88(%esp) # 4-byte Folded Reload
+ movl %edx, 68(%esp) # 4-byte Spill
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ mull 84(%esp) # 4-byte Folded Reload
+ movl %edx, 72(%esp) # 4-byte Spill
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ mull 80(%esp) # 4-byte Folded Reload
+ movl %edx, 76(%esp) # 4-byte Spill
+ movl %eax, %edi
+ movl %ecx, %eax
+ mull 96(%esp) # 4-byte Folded Reload
+ movl %edx, %ebx
+ movl %eax, 24(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ mull 100(%esp) # 4-byte Folded Reload
+ movl %edx, %ebp
+ movl %eax, %esi
+ movl %ecx, %eax
+ mull 92(%esp) # 4-byte Folded Reload
+ movl %eax, 64(%esp) # 4-byte Spill
+ addl %esi, %edx
+ movl %edx, %esi
+ adcl 24(%esp), %ebp # 4-byte Folded Reload
+ adcl %edi, %ebx
+ movl %ebx, %edi
+ movl 76(%esp), %edx # 4-byte Reload
+ adcl 28(%esp), %edx # 4-byte Folded Reload
+ movl 72(%esp), %ecx # 4-byte Reload
+ adcl 32(%esp), %ecx # 4-byte Folded Reload
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl $0, %eax
+ movl 64(%esp), %ebx # 4-byte Reload
+ addl 40(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 64(%esp) # 4-byte Spill
+ adcl 44(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 44(%esp) # 4-byte Spill
+ adcl 48(%esp), %ebp # 4-byte Folded Reload
+ movl %ebp, 40(%esp) # 4-byte Spill
+ adcl 52(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 32(%esp) # 4-byte Spill
+ adcl 56(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 76(%esp) # 4-byte Spill
+ adcl 60(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 72(%esp) # 4-byte Spill
+ adcl 36(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 68(%esp) # 4-byte Spill
+ sbbl %eax, %eax
+ movl %ebx, %esi
+ imull 112(%esp), %esi # 4-byte Folded Reload
+ andl $1, %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl %esi, %eax
+ mull 128(%esp) # 4-byte Folded Reload
+ movl %edx, 60(%esp) # 4-byte Spill
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl %esi, %eax
+ mull 124(%esp) # 4-byte Folded Reload
+ movl %edx, 56(%esp) # 4-byte Spill
+ movl %eax, 24(%esp) # 4-byte Spill
+ movl %esi, %eax
+ mull 120(%esp) # 4-byte Folded Reload
+ movl %edx, 52(%esp) # 4-byte Spill
+ movl %eax, 20(%esp) # 4-byte Spill
+ movl %esi, %eax
+ mull 116(%esp) # 4-byte Folded Reload
+ movl %edx, 48(%esp) # 4-byte Spill
+ movl %eax, 16(%esp) # 4-byte Spill
+ movl %esi, %eax
+ mull 104(%esp) # 4-byte Folded Reload
+ movl %edx, %ebp
+ movl %eax, %ecx
+ movl %esi, %eax
+ mull 108(%esp) # 4-byte Folded Reload
+ movl %eax, %edi
+ movl %edx, %ebx
+ addl %ecx, %ebx
+ adcl 16(%esp), %ebp # 4-byte Folded Reload
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 20(%esp), %eax # 4-byte Folded Reload
+ movl 52(%esp), %esi # 4-byte Reload
+ adcl 24(%esp), %esi # 4-byte Folded Reload
+ movl 56(%esp), %edx # 4-byte Reload
+ adcl 28(%esp), %edx # 4-byte Folded Reload
+ movl 60(%esp), %ecx # 4-byte Reload
+ adcl $0, %ecx
+ addl 64(%esp), %edi # 4-byte Folded Reload
+ adcl 44(%esp), %ebx # 4-byte Folded Reload
+ adcl 40(%esp), %ebp # 4-byte Folded Reload
+ adcl 32(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 48(%esp) # 4-byte Spill
+ adcl 76(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 52(%esp) # 4-byte Spill
+ adcl 72(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 56(%esp) # 4-byte Spill
+ adcl 68(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 60(%esp) # 4-byte Spill
+ adcl $0, 36(%esp) # 4-byte Folded Spill
+ movl 160(%esp), %eax
+ movl 8(%eax), %ecx
+ movl %ecx, %eax
+ mull 88(%esp) # 4-byte Folded Reload
+ movl %edx, %edi
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ mull 84(%esp) # 4-byte Folded Reload
+ movl %edx, 68(%esp) # 4-byte Spill
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ mull 80(%esp) # 4-byte Folded Reload
+ movl %edx, 72(%esp) # 4-byte Spill
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ mull 96(%esp) # 4-byte Folded Reload
+ movl %edx, 76(%esp) # 4-byte Spill
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ mull 100(%esp) # 4-byte Folded Reload
+ movl %edx, %esi
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ mull 92(%esp) # 4-byte Folded Reload
+ addl 44(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 44(%esp) # 4-byte Spill
+ adcl 40(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 40(%esp) # 4-byte Spill
+ movl 76(%esp), %esi # 4-byte Reload
+ adcl 28(%esp), %esi # 4-byte Folded Reload
+ movl 72(%esp), %edx # 4-byte Reload
+ adcl 32(%esp), %edx # 4-byte Folded Reload
+ movl 68(%esp), %ecx # 4-byte Reload
+ adcl 64(%esp), %ecx # 4-byte Folded Reload
+ adcl $0, %edi
+ addl %ebx, %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ adcl %ebp, 44(%esp) # 4-byte Folded Spill
+ movl 48(%esp), %ebx # 4-byte Reload
+ adcl %ebx, 40(%esp) # 4-byte Folded Spill
+ adcl 52(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 76(%esp) # 4-byte Spill
+ adcl 56(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 72(%esp) # 4-byte Spill
+ adcl 60(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 68(%esp) # 4-byte Spill
+ adcl 36(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 52(%esp) # 4-byte Spill
+ sbbl %ecx, %ecx
+ movl %eax, %esi
+ imull 112(%esp), %esi # 4-byte Folded Reload
+ andl $1, %ecx
+ movl %ecx, 36(%esp) # 4-byte Spill
+ movl %esi, %eax
+ mull 128(%esp) # 4-byte Folded Reload
+ movl %edx, 64(%esp) # 4-byte Spill
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl %esi, %eax
+ mull 124(%esp) # 4-byte Folded Reload
+ movl %edx, 60(%esp) # 4-byte Spill
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl %esi, %eax
+ mull 120(%esp) # 4-byte Folded Reload
+ movl %edx, 56(%esp) # 4-byte Spill
+ movl %eax, 24(%esp) # 4-byte Spill
+ movl %esi, %eax
+ mull 116(%esp) # 4-byte Folded Reload
+ movl %edx, %ecx
+ movl %eax, 20(%esp) # 4-byte Spill
+ movl %esi, %eax
+ mull 104(%esp) # 4-byte Folded Reload
+ movl %edx, %ebp
+ movl %eax, %edi
+ movl %esi, %eax
+ mull 108(%esp) # 4-byte Folded Reload
+ movl %eax, %esi
+ movl %edx, %ebx
+ addl %edi, %ebx
+ adcl 20(%esp), %ebp # 4-byte Folded Reload
+ movl %ecx, %eax
+ adcl 24(%esp), %eax # 4-byte Folded Reload
+ movl 56(%esp), %edi # 4-byte Reload
+ adcl 28(%esp), %edi # 4-byte Folded Reload
+ movl 60(%esp), %edx # 4-byte Reload
+ adcl 48(%esp), %edx # 4-byte Folded Reload
+ movl 64(%esp), %ecx # 4-byte Reload
+ adcl $0, %ecx
+ addl 32(%esp), %esi # 4-byte Folded Reload
+ adcl 44(%esp), %ebx # 4-byte Folded Reload
+ adcl 40(%esp), %ebp # 4-byte Folded Reload
+ adcl 76(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 44(%esp) # 4-byte Spill
+ adcl 72(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 56(%esp) # 4-byte Spill
+ adcl 68(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 60(%esp) # 4-byte Spill
+ adcl 52(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 64(%esp) # 4-byte Spill
+ adcl $0, 36(%esp) # 4-byte Folded Spill
+ movl 160(%esp), %eax
+ movl 12(%eax), %edi
+ movl %edi, %eax
+ mull 88(%esp) # 4-byte Folded Reload
+ movl %edx, %esi
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl %edi, %eax
+ mull 84(%esp) # 4-byte Folded Reload
+ movl %edx, 68(%esp) # 4-byte Spill
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl %edi, %eax
+ mull 80(%esp) # 4-byte Folded Reload
+ movl %edx, 72(%esp) # 4-byte Spill
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl %edi, %eax
+ mull 96(%esp) # 4-byte Folded Reload
+ movl %edx, 76(%esp) # 4-byte Spill
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl %edi, %eax
+ mull 100(%esp) # 4-byte Folded Reload
+ movl %edx, %ecx
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl %edi, %eax
+ mull 92(%esp) # 4-byte Folded Reload
+ addl 52(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 52(%esp) # 4-byte Spill
+ adcl 48(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 48(%esp) # 4-byte Spill
+ movl 76(%esp), %ecx # 4-byte Reload
+ adcl 28(%esp), %ecx # 4-byte Folded Reload
+ movl 72(%esp), %edi # 4-byte Reload
+ adcl 32(%esp), %edi # 4-byte Folded Reload
+ movl 68(%esp), %edx # 4-byte Reload
+ adcl 40(%esp), %edx # 4-byte Folded Reload
+ adcl $0, %esi
+ addl %ebx, %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ adcl %ebp, 52(%esp) # 4-byte Folded Spill
+ movl 44(%esp), %ebx # 4-byte Reload
+ adcl %ebx, 48(%esp) # 4-byte Folded Spill
+ adcl 56(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 76(%esp) # 4-byte Spill
+ adcl 60(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 72(%esp) # 4-byte Spill
+ adcl 64(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 68(%esp) # 4-byte Spill
+ adcl 36(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 44(%esp) # 4-byte Spill
+ sbbl %ecx, %ecx
+ movl %eax, %esi
+ imull 112(%esp), %esi # 4-byte Folded Reload
+ andl $1, %ecx
+ movl %ecx, 40(%esp) # 4-byte Spill
+ movl %esi, %eax
+ mull 128(%esp) # 4-byte Folded Reload
+ movl %edx, 64(%esp) # 4-byte Spill
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl %esi, %eax
+ mull 124(%esp) # 4-byte Folded Reload
+ movl %edx, 60(%esp) # 4-byte Spill
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl %esi, %eax
+ mull 120(%esp) # 4-byte Folded Reload
+ movl %edx, 56(%esp) # 4-byte Spill
+ movl %eax, 24(%esp) # 4-byte Spill
+ movl %esi, %eax
+ mull 116(%esp) # 4-byte Folded Reload
+ movl %edx, %ecx
+ movl %eax, 20(%esp) # 4-byte Spill
+ movl %esi, %eax
+ mull 104(%esp) # 4-byte Folded Reload
+ movl %edx, %ebp
+ movl %eax, %edi
+ movl %esi, %eax
+ mull 108(%esp) # 4-byte Folded Reload
+ movl %eax, %esi
+ movl %edx, %ebx
+ addl %edi, %ebx
+ adcl 20(%esp), %ebp # 4-byte Folded Reload
+ movl %ecx, %eax
+ adcl 24(%esp), %eax # 4-byte Folded Reload
+ movl 56(%esp), %edi # 4-byte Reload
+ adcl 28(%esp), %edi # 4-byte Folded Reload
+ movl 60(%esp), %edx # 4-byte Reload
+ adcl 36(%esp), %edx # 4-byte Folded Reload
+ movl 64(%esp), %ecx # 4-byte Reload
+ adcl $0, %ecx
+ addl 32(%esp), %esi # 4-byte Folded Reload
+ adcl 52(%esp), %ebx # 4-byte Folded Reload
+ adcl 48(%esp), %ebp # 4-byte Folded Reload
+ adcl 76(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 52(%esp) # 4-byte Spill
+ adcl 72(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 56(%esp) # 4-byte Spill
+ adcl 68(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 60(%esp) # 4-byte Spill
+ adcl 44(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 64(%esp) # 4-byte Spill
+ adcl $0, 40(%esp) # 4-byte Folded Spill
+ movl 160(%esp), %eax
+ movl 16(%eax), %edi
+ movl %edi, %eax
+ mull 88(%esp) # 4-byte Folded Reload
+ movl %edx, %esi
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl %edi, %eax
+ mull 84(%esp) # 4-byte Folded Reload
+ movl %edx, 68(%esp) # 4-byte Spill
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl %edi, %eax
+ mull 80(%esp) # 4-byte Folded Reload
+ movl %edx, 72(%esp) # 4-byte Spill
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl %edi, %eax
+ mull 96(%esp) # 4-byte Folded Reload
+ movl %edx, 76(%esp) # 4-byte Spill
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl %edi, %eax
+ mull 100(%esp) # 4-byte Folded Reload
+ movl %edx, %ecx
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl %edi, %eax
+ mull 92(%esp) # 4-byte Folded Reload
+ addl 48(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 48(%esp) # 4-byte Spill
+ adcl 44(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 44(%esp) # 4-byte Spill
+ movl 76(%esp), %ecx # 4-byte Reload
+ adcl 28(%esp), %ecx # 4-byte Folded Reload
+ movl 72(%esp), %edi # 4-byte Reload
+ adcl 32(%esp), %edi # 4-byte Folded Reload
+ movl 68(%esp), %edx # 4-byte Reload
+ adcl 36(%esp), %edx # 4-byte Folded Reload
+ adcl $0, %esi
+ addl %ebx, %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ adcl %ebp, 48(%esp) # 4-byte Folded Spill
+ movl 52(%esp), %ebx # 4-byte Reload
+ adcl %ebx, 44(%esp) # 4-byte Folded Spill
+ adcl 56(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 76(%esp) # 4-byte Spill
+ adcl 60(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 72(%esp) # 4-byte Spill
+ adcl 64(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 68(%esp) # 4-byte Spill
+ adcl 40(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 36(%esp) # 4-byte Spill
+ sbbl %ecx, %ecx
+ movl %eax, %esi
+ imull 112(%esp), %esi # 4-byte Folded Reload
+ andl $1, %ecx
+ movl %ecx, 40(%esp) # 4-byte Spill
+ movl %esi, %eax
+ mull 128(%esp) # 4-byte Folded Reload
+ movl %edx, 64(%esp) # 4-byte Spill
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl %esi, %eax
+ mull 124(%esp) # 4-byte Folded Reload
+ movl %edx, 60(%esp) # 4-byte Spill
+ movl %eax, 24(%esp) # 4-byte Spill
+ movl %esi, %eax
+ mull 120(%esp) # 4-byte Folded Reload
+ movl %edx, 56(%esp) # 4-byte Spill
+ movl %eax, 20(%esp) # 4-byte Spill
+ movl %esi, %eax
+ mull 116(%esp) # 4-byte Folded Reload
+ movl %edx, 52(%esp) # 4-byte Spill
+ movl %eax, 16(%esp) # 4-byte Spill
+ movl %esi, %eax
+ mull 104(%esp) # 4-byte Folded Reload
+ movl %edx, %ebp
+ movl %eax, %edi
+ movl %esi, %eax
+ mull 108(%esp) # 4-byte Folded Reload
+ movl %eax, %ecx
+ movl %edx, %ebx
+ addl %edi, %ebx
+ adcl 16(%esp), %ebp # 4-byte Folded Reload
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 20(%esp), %eax # 4-byte Folded Reload
+ movl 56(%esp), %edi # 4-byte Reload
+ adcl 24(%esp), %edi # 4-byte Folded Reload
+ movl 60(%esp), %esi # 4-byte Reload
+ adcl 28(%esp), %esi # 4-byte Folded Reload
+ movl 64(%esp), %edx # 4-byte Reload
+ adcl $0, %edx
+ addl 32(%esp), %ecx # 4-byte Folded Reload
+ adcl 48(%esp), %ebx # 4-byte Folded Reload
+ adcl 44(%esp), %ebp # 4-byte Folded Reload
+ adcl 76(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 52(%esp) # 4-byte Spill
+ adcl 72(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 56(%esp) # 4-byte Spill
+ adcl 68(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 60(%esp) # 4-byte Spill
+ adcl 36(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 64(%esp) # 4-byte Spill
+ adcl $0, 40(%esp) # 4-byte Folded Spill
+ movl 160(%esp), %eax
+ movl 20(%eax), %edi
+ movl %edi, %eax
+ mull 88(%esp) # 4-byte Folded Reload
+ movl %edx, %esi
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl %edi, %eax
+ mull 84(%esp) # 4-byte Folded Reload
+ movl %edx, 84(%esp) # 4-byte Spill
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl %edi, %eax
+ mull 80(%esp) # 4-byte Folded Reload
+ movl %edx, 88(%esp) # 4-byte Spill
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl %edi, %eax
+ mull 96(%esp) # 4-byte Folded Reload
+ movl %edx, 96(%esp) # 4-byte Spill
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl %edi, %eax
+ mull 100(%esp) # 4-byte Folded Reload
+ movl %edx, %ecx
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl %edi, %eax
+ mull 92(%esp) # 4-byte Folded Reload
+ movl %eax, 100(%esp) # 4-byte Spill
+ addl 48(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 92(%esp) # 4-byte Spill
+ adcl 80(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 80(%esp) # 4-byte Spill
+ movl 96(%esp), %ecx # 4-byte Reload
+ adcl 68(%esp), %ecx # 4-byte Folded Reload
+ movl 88(%esp), %edi # 4-byte Reload
+ adcl 72(%esp), %edi # 4-byte Folded Reload
+ movl 84(%esp), %edx # 4-byte Reload
+ adcl 76(%esp), %edx # 4-byte Folded Reload
+ movl %esi, %eax
+ adcl $0, %eax
+ movl 100(%esp), %esi # 4-byte Reload
+ addl %ebx, %esi
+ movl %esi, 100(%esp) # 4-byte Spill
+ adcl %ebp, 92(%esp) # 4-byte Folded Spill
+ movl 52(%esp), %ebx # 4-byte Reload
+ adcl %ebx, 80(%esp) # 4-byte Folded Spill
+ adcl 56(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 96(%esp) # 4-byte Spill
+ adcl 60(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 88(%esp) # 4-byte Spill
+ adcl 64(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 84(%esp) # 4-byte Spill
+ adcl 40(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 76(%esp) # 4-byte Spill
+ sbbl %eax, %eax
+ movl 112(%esp), %ecx # 4-byte Reload
+ imull %esi, %ecx
+ movl %ecx, 112(%esp) # 4-byte Spill
+ andl $1, %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ mull 128(%esp) # 4-byte Folded Reload
+ movl %edx, %edi
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ mull 124(%esp) # 4-byte Folded Reload
+ movl %edx, %ebp
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ mull 120(%esp) # 4-byte Folded Reload
+ movl %edx, %ebx
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ mull 108(%esp) # 4-byte Folded Reload
+ movl %edx, 56(%esp) # 4-byte Spill
+ movl %eax, %esi
+ movl %ecx, %eax
+ mull 116(%esp) # 4-byte Folded Reload
+ movl %edx, %ecx
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 112(%esp), %eax # 4-byte Reload
+ mull 104(%esp) # 4-byte Folded Reload
+ addl 56(%esp), %eax # 4-byte Folded Reload
+ adcl 52(%esp), %edx # 4-byte Folded Reload
+ adcl 60(%esp), %ecx # 4-byte Folded Reload
+ adcl 64(%esp), %ebx # 4-byte Folded Reload
+ adcl 68(%esp), %ebp # 4-byte Folded Reload
+ adcl $0, %edi
+ addl 100(%esp), %esi # 4-byte Folded Reload
+ adcl 92(%esp), %eax # 4-byte Folded Reload
+ adcl 80(%esp), %edx # 4-byte Folded Reload
+ adcl 96(%esp), %ecx # 4-byte Folded Reload
+ adcl 88(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 112(%esp) # 4-byte Spill
+ adcl 84(%esp), %ebp # 4-byte Folded Reload
+ adcl 76(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 100(%esp) # 4-byte Spill
+ movl 72(%esp), %edi # 4-byte Reload
+ adcl $0, %edi
+ movl %eax, %esi
+ subl 108(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 108(%esp) # 4-byte Spill
+ movl %edx, %esi
+ sbbl 104(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 104(%esp) # 4-byte Spill
+ movl %ecx, %esi
+ sbbl 116(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 116(%esp) # 4-byte Spill
+ movl %ebx, %esi
+ movl %edi, %ebx
+ sbbl 120(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 120(%esp) # 4-byte Spill
+ movl %ebp, %esi
+ movl %ebp, %edi
+ sbbl 124(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 124(%esp) # 4-byte Spill
+ movl 100(%esp), %ebp # 4-byte Reload
+ movl %ebp, %esi
+ sbbl 128(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 128(%esp) # 4-byte Spill
+ sbbl $0, %ebx
+ andl $1, %ebx
+ jne .LBB83_2
+# BB#1:
+ movl 104(%esp), %edx # 4-byte Reload
+.LBB83_2:
+ testb %bl, %bl
+ jne .LBB83_4
+# BB#3:
+ movl 108(%esp), %eax # 4-byte Reload
+.LBB83_4:
+ movl 152(%esp), %ebx
+ movl %eax, (%ebx)
+ movl %edx, 4(%ebx)
+ jne .LBB83_6
+# BB#5:
+ movl 116(%esp), %ecx # 4-byte Reload
+.LBB83_6:
+ movl %ecx, 8(%ebx)
+ movl 112(%esp), %eax # 4-byte Reload
+ jne .LBB83_8
+# BB#7:
+ movl 120(%esp), %eax # 4-byte Reload
+.LBB83_8:
+ movl %eax, 12(%ebx)
+ jne .LBB83_10
+# BB#9:
+ movl 124(%esp), %edi # 4-byte Reload
+.LBB83_10:
+ movl %edi, 16(%ebx)
+ jne .LBB83_12
+# BB#11:
+ movl 128(%esp), %ebp # 4-byte Reload
+.LBB83_12:
+ movl %ebp, 20(%ebx)
+ addl $132, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end83:
+ .size mcl_fp_mont6L, .Lfunc_end83-mcl_fp_mont6L
+
+ .globl mcl_fp_montNF6L
+ .align 16, 0x90
+ .type mcl_fp_montNF6L,@function
+mcl_fp_montNF6L: # @mcl_fp_montNF6L
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $132, %esp
+ movl 156(%esp), %ebx
+ movl (%ebx), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 160(%esp), %ecx
+ movl (%ecx), %edi
+ mull %edi
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl %edx, 64(%esp) # 4-byte Spill
+ movl 164(%esp), %esi
+ movl -4(%esi), %edx
+ movl %edx, 96(%esp) # 4-byte Spill
+ movl %eax, %ecx
+ imull %edx, %ecx
+ movl (%esi), %edx
+ movl %edx, 128(%esp) # 4-byte Spill
+ movl 20(%esi), %edx
+ movl %edx, 108(%esp) # 4-byte Spill
+ movl 16(%esi), %eax
+ movl %eax, 124(%esp) # 4-byte Spill
+ movl 12(%esi), %eax
+ movl %eax, 120(%esp) # 4-byte Spill
+ movl 8(%esi), %eax
+ movl %eax, 116(%esp) # 4-byte Spill
+ movl 4(%esi), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 4(%ebx), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 20(%ebx), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 16(%ebx), %esi
+ movl %esi, 80(%esp) # 4-byte Spill
+ movl 12(%ebx), %ebp
+ movl %ebp, 84(%esp) # 4-byte Spill
+ movl 8(%ebx), %ebx
+ movl %ebx, 88(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ mull %edx
+ movl %edx, 60(%esp) # 4-byte Spill
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ mull 124(%esp) # 4-byte Folded Reload
+ movl %edx, 56(%esp) # 4-byte Spill
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ mull 120(%esp) # 4-byte Folded Reload
+ movl %edx, 48(%esp) # 4-byte Spill
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ mull 116(%esp) # 4-byte Folded Reload
+ movl %edx, 40(%esp) # 4-byte Spill
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ mull 112(%esp) # 4-byte Folded Reload
+ movl %edx, 32(%esp) # 4-byte Spill
+ movl %eax, 20(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ mull 128(%esp) # 4-byte Folded Reload
+ movl %edx, 24(%esp) # 4-byte Spill
+ movl %eax, 16(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ mull %edi
+ movl %edx, 76(%esp) # 4-byte Spill
+ movl %eax, 12(%esp) # 4-byte Spill
+ movl %esi, %eax
+ mull %edi
+ movl %edx, 68(%esp) # 4-byte Spill
+ movl %eax, 8(%esp) # 4-byte Spill
+ movl %ebp, %eax
+ mull %edi
+ movl %edx, %ecx
+ movl %eax, 4(%esp) # 4-byte Spill
+ movl %ebx, %eax
+ mull %edi
+ movl %edx, %ebx
+ movl %eax, (%esp) # 4-byte Spill
+ movl 104(%esp), %eax # 4-byte Reload
+ mull %edi
+ movl %edx, %ebp
+ movl %eax, %esi
+ addl 64(%esp), %esi # 4-byte Folded Reload
+ adcl (%esp), %ebp # 4-byte Folded Reload
+ adcl 4(%esp), %ebx # 4-byte Folded Reload
+ adcl 8(%esp), %ecx # 4-byte Folded Reload
+ movl 68(%esp), %edx # 4-byte Reload
+ adcl 12(%esp), %edx # 4-byte Folded Reload
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl $0, %eax
+ movl 16(%esp), %edi # 4-byte Reload
+ addl 72(%esp), %edi # 4-byte Folded Reload
+ adcl 20(%esp), %esi # 4-byte Folded Reload
+ adcl 28(%esp), %ebp # 4-byte Folded Reload
+ adcl 36(%esp), %ebx # 4-byte Folded Reload
+ adcl 44(%esp), %ecx # 4-byte Folded Reload
+ adcl 52(%esp), %edx # 4-byte Folded Reload
+ adcl $0, %eax
+ addl 24(%esp), %esi # 4-byte Folded Reload
+ adcl 32(%esp), %ebp # 4-byte Folded Reload
+ adcl 40(%esp), %ebx # 4-byte Folded Reload
+ adcl 48(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 64(%esp) # 4-byte Spill
+ adcl 56(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 68(%esp) # 4-byte Spill
+ adcl 60(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 160(%esp), %eax
+ movl 4(%eax), %edi
+ movl %edi, %eax
+ mull 100(%esp) # 4-byte Folded Reload
+ movl %edx, 72(%esp) # 4-byte Spill
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl %edi, %eax
+ mull 80(%esp) # 4-byte Folded Reload
+ movl %edx, 60(%esp) # 4-byte Spill
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl %edi, %eax
+ mull 84(%esp) # 4-byte Folded Reload
+ movl %edx, 56(%esp) # 4-byte Spill
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl %edi, %eax
+ mull 88(%esp) # 4-byte Folded Reload
+ movl %edx, 52(%esp) # 4-byte Spill
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl %edi, %eax
+ mull 104(%esp) # 4-byte Folded Reload
+ movl %edx, %ecx
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl %edi, %eax
+ mull 92(%esp) # 4-byte Folded Reload
+ movl %eax, 40(%esp) # 4-byte Spill
+ addl 44(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 44(%esp) # 4-byte Spill
+ adcl 48(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 48(%esp) # 4-byte Spill
+ movl 52(%esp), %ecx # 4-byte Reload
+ adcl 28(%esp), %ecx # 4-byte Folded Reload
+ movl 56(%esp), %edi # 4-byte Reload
+ adcl 32(%esp), %edi # 4-byte Folded Reload
+ movl 60(%esp), %edx # 4-byte Reload
+ adcl 36(%esp), %edx # 4-byte Folded Reload
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl $0, %eax
+ addl %esi, 40(%esp) # 4-byte Folded Spill
+ adcl %ebp, 44(%esp) # 4-byte Folded Spill
+ adcl %ebx, 48(%esp) # 4-byte Folded Spill
+ adcl 64(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 52(%esp) # 4-byte Spill
+ adcl 68(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 56(%esp) # 4-byte Spill
+ adcl 76(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 60(%esp) # 4-byte Spill
+ adcl $0, %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 40(%esp), %ecx # 4-byte Reload
+ movl %ecx, %ebp
+ imull 96(%esp), %ebp # 4-byte Folded Reload
+ movl %ebp, %eax
+ mull 108(%esp) # 4-byte Folded Reload
+ movl %edx, 76(%esp) # 4-byte Spill
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl %ebp, %eax
+ mull 124(%esp) # 4-byte Folded Reload
+ movl %edx, 36(%esp) # 4-byte Spill
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl %ebp, %eax
+ mull 120(%esp) # 4-byte Folded Reload
+ movl %edx, 32(%esp) # 4-byte Spill
+ movl %eax, %ebx
+ movl %ebp, %eax
+ mull 116(%esp) # 4-byte Folded Reload
+ movl %edx, 28(%esp) # 4-byte Spill
+ movl %eax, %esi
+ movl %ebp, %eax
+ mull 112(%esp) # 4-byte Folded Reload
+ movl %edx, 24(%esp) # 4-byte Spill
+ movl %eax, %edi
+ movl %ebp, %eax
+ mull 128(%esp) # 4-byte Folded Reload
+ addl %ecx, %eax
+ adcl 44(%esp), %edi # 4-byte Folded Reload
+ movl %esi, %ebp
+ adcl 48(%esp), %ebp # 4-byte Folded Reload
+ adcl 52(%esp), %ebx # 4-byte Folded Reload
+ movl 64(%esp), %ecx # 4-byte Reload
+ adcl 56(%esp), %ecx # 4-byte Folded Reload
+ movl 68(%esp), %esi # 4-byte Reload
+ adcl 60(%esp), %esi # 4-byte Folded Reload
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl $0, %eax
+ addl %edx, %edi
+ movl %edi, 40(%esp) # 4-byte Spill
+ adcl 24(%esp), %ebp # 4-byte Folded Reload
+ movl %ebp, 44(%esp) # 4-byte Spill
+ adcl 28(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 60(%esp) # 4-byte Spill
+ adcl 32(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 64(%esp) # 4-byte Spill
+ adcl 36(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 68(%esp) # 4-byte Spill
+ adcl 76(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 160(%esp), %eax
+ movl 8(%eax), %ecx
+ movl %ecx, %eax
+ mull 100(%esp) # 4-byte Folded Reload
+ movl %edx, 76(%esp) # 4-byte Spill
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ mull 80(%esp) # 4-byte Folded Reload
+ movl %edx, 56(%esp) # 4-byte Spill
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ mull 84(%esp) # 4-byte Folded Reload
+ movl %edx, 52(%esp) # 4-byte Spill
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ mull 88(%esp) # 4-byte Folded Reload
+ movl %edx, 48(%esp) # 4-byte Spill
+ movl %eax, %edi
+ movl %ecx, %eax
+ mull 104(%esp) # 4-byte Folded Reload
+ movl %edx, %ebx
+ movl %eax, %esi
+ movl %ecx, %eax
+ mull 92(%esp) # 4-byte Folded Reload
+ movl %eax, 24(%esp) # 4-byte Spill
+ movl %edx, %ebp
+ addl %esi, %ebp
+ adcl %edi, %ebx
+ movl %ebx, %edi
+ movl 48(%esp), %ecx # 4-byte Reload
+ adcl 28(%esp), %ecx # 4-byte Folded Reload
+ movl 52(%esp), %esi # 4-byte Reload
+ adcl 32(%esp), %esi # 4-byte Folded Reload
+ movl 56(%esp), %edx # 4-byte Reload
+ adcl 36(%esp), %edx # 4-byte Folded Reload
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl $0, %eax
+ movl 24(%esp), %ebx # 4-byte Reload
+ addl 40(%esp), %ebx # 4-byte Folded Reload
+ adcl 44(%esp), %ebp # 4-byte Folded Reload
+ adcl 60(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 44(%esp) # 4-byte Spill
+ adcl 64(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 48(%esp) # 4-byte Spill
+ adcl 68(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 52(%esp) # 4-byte Spill
+ adcl 72(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 56(%esp) # 4-byte Spill
+ adcl $0, %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl %ebx, %edi
+ movl %ebx, %ecx
+ imull 96(%esp), %edi # 4-byte Folded Reload
+ movl %edi, %eax
+ mull 108(%esp) # 4-byte Folded Reload
+ movl %edx, 72(%esp) # 4-byte Spill
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl %edi, %eax
+ mull 124(%esp) # 4-byte Folded Reload
+ movl %edx, 36(%esp) # 4-byte Spill
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl %edi, %eax
+ mull 120(%esp) # 4-byte Folded Reload
+ movl %edx, 32(%esp) # 4-byte Spill
+ movl %eax, %ebx
+ movl %edi, %eax
+ mull 116(%esp) # 4-byte Folded Reload
+ movl %edx, 28(%esp) # 4-byte Spill
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl %edi, %eax
+ mull 112(%esp) # 4-byte Folded Reload
+ movl %edx, 24(%esp) # 4-byte Spill
+ movl %eax, %esi
+ movl %edi, %eax
+ mull 128(%esp) # 4-byte Folded Reload
+ movl %edx, 40(%esp) # 4-byte Spill
+ addl %ecx, %eax
+ adcl %ebp, %esi
+ movl 60(%esp), %edx # 4-byte Reload
+ adcl 44(%esp), %edx # 4-byte Folded Reload
+ adcl 48(%esp), %ebx # 4-byte Folded Reload
+ movl 64(%esp), %edi # 4-byte Reload
+ adcl 52(%esp), %edi # 4-byte Folded Reload
+ movl 68(%esp), %ecx # 4-byte Reload
+ adcl 56(%esp), %ecx # 4-byte Folded Reload
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl $0, %eax
+ addl 40(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 40(%esp) # 4-byte Spill
+ adcl 24(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 60(%esp) # 4-byte Spill
+ adcl 28(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 44(%esp) # 4-byte Spill
+ adcl 32(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 64(%esp) # 4-byte Spill
+ adcl 36(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 68(%esp) # 4-byte Spill
+ adcl 72(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 160(%esp), %eax
+ movl 12(%eax), %ecx
+ movl %ecx, %eax
+ mull 100(%esp) # 4-byte Folded Reload
+ movl %edx, 72(%esp) # 4-byte Spill
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ mull 80(%esp) # 4-byte Folded Reload
+ movl %edx, 56(%esp) # 4-byte Spill
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ mull 84(%esp) # 4-byte Folded Reload
+ movl %edx, 52(%esp) # 4-byte Spill
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ mull 88(%esp) # 4-byte Folded Reload
+ movl %edx, 48(%esp) # 4-byte Spill
+ movl %eax, %esi
+ movl %ecx, %eax
+ mull 104(%esp) # 4-byte Folded Reload
+ movl %edx, %edi
+ movl %eax, %ebx
+ movl %ecx, %eax
+ mull 92(%esp) # 4-byte Folded Reload
+ movl %eax, 24(%esp) # 4-byte Spill
+ movl %edx, %ebp
+ addl %ebx, %ebp
+ adcl %esi, %edi
+ movl %edi, %esi
+ movl 48(%esp), %ecx # 4-byte Reload
+ adcl 28(%esp), %ecx # 4-byte Folded Reload
+ movl 52(%esp), %ebx # 4-byte Reload
+ adcl 32(%esp), %ebx # 4-byte Folded Reload
+ movl 56(%esp), %edx # 4-byte Reload
+ adcl 36(%esp), %edx # 4-byte Folded Reload
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl $0, %eax
+ movl 24(%esp), %edi # 4-byte Reload
+ addl 40(%esp), %edi # 4-byte Folded Reload
+ adcl 60(%esp), %ebp # 4-byte Folded Reload
+ adcl 44(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 44(%esp) # 4-byte Spill
+ adcl 64(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 48(%esp) # 4-byte Spill
+ adcl 68(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 52(%esp) # 4-byte Spill
+ adcl 76(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 56(%esp) # 4-byte Spill
+ adcl $0, %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl %edi, %esi
+ movl %edi, %ecx
+ imull 96(%esp), %esi # 4-byte Folded Reload
+ movl %esi, %eax
+ mull 108(%esp) # 4-byte Folded Reload
+ movl %edx, 76(%esp) # 4-byte Spill
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl %esi, %eax
+ mull 124(%esp) # 4-byte Folded Reload
+ movl %edx, 36(%esp) # 4-byte Spill
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl %esi, %eax
+ mull 120(%esp) # 4-byte Folded Reload
+ movl %edx, 32(%esp) # 4-byte Spill
+ movl %eax, %edi
+ movl %esi, %eax
+ mull 116(%esp) # 4-byte Folded Reload
+ movl %edx, 28(%esp) # 4-byte Spill
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl %esi, %eax
+ mull 112(%esp) # 4-byte Folded Reload
+ movl %edx, 24(%esp) # 4-byte Spill
+ movl %eax, %ebx
+ movl %esi, %eax
+ mull 128(%esp) # 4-byte Folded Reload
+ movl %edx, 40(%esp) # 4-byte Spill
+ addl %ecx, %eax
+ adcl %ebp, %ebx
+ movl 60(%esp), %edx # 4-byte Reload
+ adcl 44(%esp), %edx # 4-byte Folded Reload
+ adcl 48(%esp), %edi # 4-byte Folded Reload
+ movl 64(%esp), %esi # 4-byte Reload
+ adcl 52(%esp), %esi # 4-byte Folded Reload
+ movl 68(%esp), %ecx # 4-byte Reload
+ adcl 56(%esp), %ecx # 4-byte Folded Reload
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl $0, %eax
+ addl 40(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 40(%esp) # 4-byte Spill
+ adcl 24(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 60(%esp) # 4-byte Spill
+ adcl 28(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 44(%esp) # 4-byte Spill
+ adcl 32(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 64(%esp) # 4-byte Spill
+ adcl 36(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 68(%esp) # 4-byte Spill
+ adcl 76(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 160(%esp), %eax
+ movl 16(%eax), %ecx
+ movl %ecx, %eax
+ mull 100(%esp) # 4-byte Folded Reload
+ movl %edx, 76(%esp) # 4-byte Spill
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ mull 80(%esp) # 4-byte Folded Reload
+ movl %edx, 56(%esp) # 4-byte Spill
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ mull 84(%esp) # 4-byte Folded Reload
+ movl %edx, 52(%esp) # 4-byte Spill
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ mull 88(%esp) # 4-byte Folded Reload
+ movl %edx, 48(%esp) # 4-byte Spill
+ movl %eax, %edi
+ movl %ecx, %eax
+ mull 104(%esp) # 4-byte Folded Reload
+ movl %edx, %esi
+ movl %eax, %ebp
+ movl %ecx, %eax
+ mull 92(%esp) # 4-byte Folded Reload
+ movl %eax, 24(%esp) # 4-byte Spill
+ movl %edx, %ebx
+ addl %ebp, %ebx
+ adcl %edi, %esi
+ movl %esi, %edi
+ movl 48(%esp), %ecx # 4-byte Reload
+ adcl 28(%esp), %ecx # 4-byte Folded Reload
+ movl 52(%esp), %ebp # 4-byte Reload
+ adcl 32(%esp), %ebp # 4-byte Folded Reload
+ movl 56(%esp), %edx # 4-byte Reload
+ adcl 36(%esp), %edx # 4-byte Folded Reload
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl $0, %eax
+ movl 24(%esp), %esi # 4-byte Reload
+ addl 40(%esp), %esi # 4-byte Folded Reload
+ adcl 60(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 40(%esp) # 4-byte Spill
+ adcl 44(%esp), %edi # 4-byte Folded Reload
+ adcl 64(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 48(%esp) # 4-byte Spill
+ adcl 68(%esp), %ebp # 4-byte Folded Reload
+ movl %ebp, 52(%esp) # 4-byte Spill
+ adcl 72(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 56(%esp) # 4-byte Spill
+ adcl $0, %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl %esi, %ebx
+ movl %esi, %ecx
+ imull 96(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, %eax
+ mull 108(%esp) # 4-byte Folded Reload
+ movl %edx, 60(%esp) # 4-byte Spill
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl %ebx, %eax
+ mull 124(%esp) # 4-byte Folded Reload
+ movl %edx, 44(%esp) # 4-byte Spill
+ movl %eax, %esi
+ movl %ebx, %eax
+ mull 120(%esp) # 4-byte Folded Reload
+ movl %edx, 36(%esp) # 4-byte Spill
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl %ebx, %eax
+ mull 116(%esp) # 4-byte Folded Reload
+ movl %edx, 32(%esp) # 4-byte Spill
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl %ebx, %eax
+ mull 112(%esp) # 4-byte Folded Reload
+ movl %edx, 28(%esp) # 4-byte Spill
+ movl %eax, %ebp
+ movl %ebx, %eax
+ mull 128(%esp) # 4-byte Folded Reload
+ addl %ecx, %eax
+ adcl 40(%esp), %ebp # 4-byte Folded Reload
+ movl 64(%esp), %ebx # 4-byte Reload
+ adcl %edi, %ebx
+ movl 68(%esp), %edi # 4-byte Reload
+ adcl 48(%esp), %edi # 4-byte Folded Reload
+ adcl 52(%esp), %esi # 4-byte Folded Reload
+ movl 72(%esp), %ecx # 4-byte Reload
+ adcl 56(%esp), %ecx # 4-byte Folded Reload
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl $0, %eax
+ addl %edx, %ebp
+ movl %ebp, 52(%esp) # 4-byte Spill
+ adcl 28(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 64(%esp) # 4-byte Spill
+ adcl 32(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 68(%esp) # 4-byte Spill
+ adcl 36(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 56(%esp) # 4-byte Spill
+ adcl 44(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 72(%esp) # 4-byte Spill
+ adcl 60(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 160(%esp), %eax
+ movl 20(%eax), %ecx
+ movl %ecx, %eax
+ mull 100(%esp) # 4-byte Folded Reload
+ movl %edx, 60(%esp) # 4-byte Spill
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ mull 80(%esp) # 4-byte Folded Reload
+ movl %edx, 100(%esp) # 4-byte Spill
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ mull 84(%esp) # 4-byte Folded Reload
+ movl %edx, 84(%esp) # 4-byte Spill
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ mull 88(%esp) # 4-byte Folded Reload
+ movl %edx, 88(%esp) # 4-byte Spill
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ mull 104(%esp) # 4-byte Folded Reload
+ movl %edx, %esi
+ movl %eax, %edi
+ movl %ecx, %eax
+ mull 92(%esp) # 4-byte Folded Reload
+ movl %eax, %ebp
+ movl %edx, %ebx
+ addl %edi, %ebx
+ adcl 40(%esp), %esi # 4-byte Folded Reload
+ movl %esi, %edi
+ movl 88(%esp), %edx # 4-byte Reload
+ adcl 44(%esp), %edx # 4-byte Folded Reload
+ movl 84(%esp), %ecx # 4-byte Reload
+ adcl 80(%esp), %ecx # 4-byte Folded Reload
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 48(%esp), %eax # 4-byte Folded Reload
+ movl 60(%esp), %esi # 4-byte Reload
+ adcl $0, %esi
+ addl 52(%esp), %ebp # 4-byte Folded Reload
+ movl %ebp, 52(%esp) # 4-byte Spill
+ adcl 64(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 64(%esp) # 4-byte Spill
+ adcl 68(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 80(%esp) # 4-byte Spill
+ adcl 56(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 88(%esp) # 4-byte Spill
+ adcl 72(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 84(%esp) # 4-byte Spill
+ adcl 76(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 100(%esp) # 4-byte Spill
+ adcl $0, %esi
+ movl %esi, 60(%esp) # 4-byte Spill
+ movl 96(%esp), %ebx # 4-byte Reload
+ imull %ebp, %ebx
+ movl %ebx, %eax
+ mull 108(%esp) # 4-byte Folded Reload
+ movl %edx, 96(%esp) # 4-byte Spill
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl %ebx, %eax
+ mull 124(%esp) # 4-byte Folded Reload
+ movl %edx, 92(%esp) # 4-byte Spill
+ movl %eax, %ebp
+ movl %ebx, %eax
+ mull 120(%esp) # 4-byte Folded Reload
+ movl %edx, 76(%esp) # 4-byte Spill
+ movl %eax, %esi
+ movl %ebx, %eax
+ mull 128(%esp) # 4-byte Folded Reload
+ movl %edx, 72(%esp) # 4-byte Spill
+ movl %eax, %edi
+ movl %ebx, %eax
+ mull 116(%esp) # 4-byte Folded Reload
+ movl %edx, 68(%esp) # 4-byte Spill
+ movl %eax, %ecx
+ movl %ebx, %eax
+ mull 112(%esp) # 4-byte Folded Reload
+ addl 52(%esp), %edi # 4-byte Folded Reload
+ adcl 64(%esp), %eax # 4-byte Folded Reload
+ adcl 80(%esp), %ecx # 4-byte Folded Reload
+ adcl 88(%esp), %esi # 4-byte Folded Reload
+ adcl 84(%esp), %ebp # 4-byte Folded Reload
+ movl 104(%esp), %ebx # 4-byte Reload
+ adcl 100(%esp), %ebx # 4-byte Folded Reload
+ movl 60(%esp), %edi # 4-byte Reload
+ adcl $0, %edi
+ addl 72(%esp), %eax # 4-byte Folded Reload
+ adcl %edx, %ecx
+ movl %ecx, 88(%esp) # 4-byte Spill
+ adcl 68(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 100(%esp) # 4-byte Spill
+ adcl 76(%esp), %ebp # 4-byte Folded Reload
+ adcl 92(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 104(%esp) # 4-byte Spill
+ adcl 96(%esp), %edi # 4-byte Folded Reload
+ movl %eax, %edx
+ subl 128(%esp), %edx # 4-byte Folded Reload
+ sbbl 112(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, %ebx
+ sbbl 116(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 116(%esp) # 4-byte Spill
+ movl %ebp, %ecx
+ sbbl 120(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 120(%esp) # 4-byte Spill
+ movl 104(%esp), %esi # 4-byte Reload
+ sbbl 124(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 128(%esp) # 4-byte Spill
+ movl %edi, %ecx
+ movl %edi, %esi
+ movl %ecx, %edi
+ sbbl 108(%esp), %edi # 4-byte Folded Reload
+ movl %edi, %ecx
+ sarl $31, %ecx
+ testl %ecx, %ecx
+ js .LBB84_2
+# BB#1:
+ movl %edx, %eax
+.LBB84_2:
+ movl 152(%esp), %ecx
+ movl %eax, (%ecx)
+ movl 88(%esp), %eax # 4-byte Reload
+ js .LBB84_4
+# BB#3:
+ movl %ebx, %eax
+.LBB84_4:
+ movl %eax, 4(%ecx)
+ movl %ecx, %ebx
+ movl %esi, %eax
+ movl 104(%esp), %ecx # 4-byte Reload
+ movl 100(%esp), %edx # 4-byte Reload
+ js .LBB84_6
+# BB#5:
+ movl 116(%esp), %edx # 4-byte Reload
+.LBB84_6:
+ movl %edx, 8(%ebx)
+ movl %ebx, %edx
+ js .LBB84_8
+# BB#7:
+ movl 120(%esp), %ebp # 4-byte Reload
+.LBB84_8:
+ movl %ebp, 12(%edx)
+ js .LBB84_10
+# BB#9:
+ movl 128(%esp), %ecx # 4-byte Reload
+.LBB84_10:
+ movl %ecx, 16(%edx)
+ js .LBB84_12
+# BB#11:
+ movl %edi, %eax
+.LBB84_12:
+ movl %eax, 20(%edx)
+ addl $132, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end84:
+ .size mcl_fp_montNF6L, .Lfunc_end84-mcl_fp_montNF6L
+
+ .globl mcl_fp_montRed6L
+ .align 16, 0x90
+ .type mcl_fp_montRed6L,@function
+mcl_fp_montRed6L: # @mcl_fp_montRed6L
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $104, %esp
+ movl 132(%esp), %eax
+ movl -4(%eax), %edx
+ movl %edx, 96(%esp) # 4-byte Spill
+ movl (%eax), %ecx
+ movl %ecx, 92(%esp) # 4-byte Spill
+ movl 128(%esp), %ebp
+ movl (%ebp), %ecx
+ movl %ecx, 72(%esp) # 4-byte Spill
+ imull %edx, %ecx
+ movl 20(%eax), %edx
+ movl %edx, 76(%esp) # 4-byte Spill
+ movl 16(%eax), %esi
+ movl %esi, 100(%esp) # 4-byte Spill
+ movl 12(%eax), %ebx
+ movl %ebx, 88(%esp) # 4-byte Spill
+ movl 8(%eax), %esi
+ movl %esi, 84(%esp) # 4-byte Spill
+ movl 4(%eax), %edi
+ movl %edi, 80(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ mull %edx
+ movl %edx, 56(%esp) # 4-byte Spill
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ mull 100(%esp) # 4-byte Folded Reload
+ movl %edx, 52(%esp) # 4-byte Spill
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ mull %ebx
+ movl %edx, 48(%esp) # 4-byte Spill
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ mull %esi
+ movl %edx, 44(%esp) # 4-byte Spill
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ mull %edi
+ movl %edx, %esi
+ movl %eax, %edi
+ movl %ecx, %eax
+ mull 92(%esp) # 4-byte Folded Reload
+ movl %eax, %ebx
+ addl %edi, %edx
+ movl %edx, 36(%esp) # 4-byte Spill
+ adcl 40(%esp), %esi # 4-byte Folded Reload
+ movl %esi, %eax
+ movl 44(%esp), %ecx # 4-byte Reload
+ adcl 60(%esp), %ecx # 4-byte Folded Reload
+ movl 48(%esp), %edi # 4-byte Reload
+ adcl 64(%esp), %edi # 4-byte Folded Reload
+ movl 52(%esp), %esi # 4-byte Reload
+ adcl 68(%esp), %esi # 4-byte Folded Reload
+ movl 56(%esp), %edx # 4-byte Reload
+ adcl $0, %edx
+ addl 72(%esp), %ebx # 4-byte Folded Reload
+ movl 36(%esp), %ebx # 4-byte Reload
+ adcl 4(%ebp), %ebx
+ adcl 8(%ebp), %eax
+ movl %eax, 16(%esp) # 4-byte Spill
+ adcl 12(%ebp), %ecx
+ movl %ecx, 44(%esp) # 4-byte Spill
+ adcl 16(%ebp), %edi
+ movl %edi, 48(%esp) # 4-byte Spill
+ adcl 20(%ebp), %esi
+ movl %esi, 52(%esp) # 4-byte Spill
+ adcl 24(%ebp), %edx
+ movl %edx, 56(%esp) # 4-byte Spill
+ movl 44(%ebp), %eax
+ movl 40(%ebp), %edx
+ movl 36(%ebp), %esi
+ movl 32(%ebp), %edi
+ movl 28(%ebp), %ecx
+ adcl $0, %ecx
+ movl %ecx, 12(%esp) # 4-byte Spill
+ adcl $0, %edi
+ movl %edi, 20(%esp) # 4-byte Spill
+ adcl $0, %esi
+ movl %esi, 60(%esp) # 4-byte Spill
+ adcl $0, %edx
+ movl %edx, 64(%esp) # 4-byte Spill
+ adcl $0, %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ sbbl %eax, %eax
+ andl $1, %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl %ebx, %esi
+ imull 96(%esp), %esi # 4-byte Folded Reload
+ movl %esi, %eax
+ mull 76(%esp) # 4-byte Folded Reload
+ movl %edx, 40(%esp) # 4-byte Spill
+ movl %eax, 8(%esp) # 4-byte Spill
+ movl %esi, %eax
+ mull 100(%esp) # 4-byte Folded Reload
+ movl %edx, 36(%esp) # 4-byte Spill
+ movl %eax, 4(%esp) # 4-byte Spill
+ movl %esi, %eax
+ mull 88(%esp) # 4-byte Folded Reload
+ movl %edx, 32(%esp) # 4-byte Spill
+ movl %eax, (%esp) # 4-byte Spill
+ movl %esi, %eax
+ mull 84(%esp) # 4-byte Folded Reload
+ movl %edx, 28(%esp) # 4-byte Spill
+ movl %eax, %ebp
+ movl %esi, %eax
+ mull 80(%esp) # 4-byte Folded Reload
+ movl %edx, %ecx
+ movl %eax, %edi
+ movl %esi, %eax
+ mull 92(%esp) # 4-byte Folded Reload
+ movl %eax, %esi
+ addl %edi, %edx
+ movl %edx, 24(%esp) # 4-byte Spill
+ adcl %ebp, %ecx
+ movl %ecx, %ebp
+ movl 28(%esp), %eax # 4-byte Reload
+ adcl (%esp), %eax # 4-byte Folded Reload
+ movl 32(%esp), %edi # 4-byte Reload
+ adcl 4(%esp), %edi # 4-byte Folded Reload
+ movl 36(%esp), %edx # 4-byte Reload
+ adcl 8(%esp), %edx # 4-byte Folded Reload
+ movl 40(%esp), %ecx # 4-byte Reload
+ adcl $0, %ecx
+ addl %ebx, %esi
+ movl 24(%esp), %esi # 4-byte Reload
+ adcl 16(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 24(%esp) # 4-byte Spill
+ adcl 44(%esp), %ebp # 4-byte Folded Reload
+ movl %ebp, 44(%esp) # 4-byte Spill
+ adcl 48(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 28(%esp) # 4-byte Spill
+ adcl 52(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 32(%esp) # 4-byte Spill
+ adcl 56(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 36(%esp) # 4-byte Spill
+ adcl 12(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 40(%esp) # 4-byte Spill
+ adcl $0, 20(%esp) # 4-byte Folded Spill
+ adcl $0, 60(%esp) # 4-byte Folded Spill
+ adcl $0, 64(%esp) # 4-byte Folded Spill
+ adcl $0, 72(%esp) # 4-byte Folded Spill
+ adcl $0, 68(%esp) # 4-byte Folded Spill
+ movl %esi, %ebx
+ imull 96(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, %eax
+ mull 76(%esp) # 4-byte Folded Reload
+ movl %edx, 56(%esp) # 4-byte Spill
+ movl %eax, 16(%esp) # 4-byte Spill
+ movl %ebx, %eax
+ mull 100(%esp) # 4-byte Folded Reload
+ movl %edx, 52(%esp) # 4-byte Spill
+ movl %eax, 12(%esp) # 4-byte Spill
+ movl %ebx, %eax
+ mull 88(%esp) # 4-byte Folded Reload
+ movl %edx, 48(%esp) # 4-byte Spill
+ movl %eax, 8(%esp) # 4-byte Spill
+ movl %ebx, %eax
+ mull 84(%esp) # 4-byte Folded Reload
+ movl %edx, %esi
+ movl %eax, 4(%esp) # 4-byte Spill
+ movl %ebx, %eax
+ mull 80(%esp) # 4-byte Folded Reload
+ movl %edx, %edi
+ movl %eax, %ecx
+ movl %ebx, %eax
+ mull 92(%esp) # 4-byte Folded Reload
+ movl %edx, %ebp
+ addl %ecx, %ebp
+ adcl 4(%esp), %edi # 4-byte Folded Reload
+ adcl 8(%esp), %esi # 4-byte Folded Reload
+ movl 48(%esp), %ebx # 4-byte Reload
+ adcl 12(%esp), %ebx # 4-byte Folded Reload
+ movl 52(%esp), %edx # 4-byte Reload
+ adcl 16(%esp), %edx # 4-byte Folded Reload
+ movl 56(%esp), %ecx # 4-byte Reload
+ adcl $0, %ecx
+ addl 24(%esp), %eax # 4-byte Folded Reload
+ adcl 44(%esp), %ebp # 4-byte Folded Reload
+ adcl 28(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 16(%esp) # 4-byte Spill
+ adcl 32(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 24(%esp) # 4-byte Spill
+ adcl 36(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 48(%esp) # 4-byte Spill
+ adcl 40(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 52(%esp) # 4-byte Spill
+ adcl 20(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 56(%esp) # 4-byte Spill
+ adcl $0, 60(%esp) # 4-byte Folded Spill
+ adcl $0, 64(%esp) # 4-byte Folded Spill
+ adcl $0, 72(%esp) # 4-byte Folded Spill
+ adcl $0, 68(%esp) # 4-byte Folded Spill
+ movl %ebp, %ecx
+ imull 96(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, %eax
+ mull 76(%esp) # 4-byte Folded Reload
+ movl %edx, 44(%esp) # 4-byte Spill
+ movl %eax, 20(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ mull 100(%esp) # 4-byte Folded Reload
+ movl %edx, 40(%esp) # 4-byte Spill
+ movl %eax, 12(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ mull 88(%esp) # 4-byte Folded Reload
+ movl %edx, 36(%esp) # 4-byte Spill
+ movl %eax, 8(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ mull 84(%esp) # 4-byte Folded Reload
+ movl %edx, 32(%esp) # 4-byte Spill
+ movl %eax, %ebx
+ movl %ecx, %eax
+ mull 80(%esp) # 4-byte Folded Reload
+ movl %edx, %esi
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ mull 92(%esp) # 4-byte Folded Reload
+ movl %eax, %edi
+ addl 28(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 28(%esp) # 4-byte Spill
+ adcl %ebx, %esi
+ movl %esi, %ebx
+ movl 32(%esp), %ecx # 4-byte Reload
+ adcl 8(%esp), %ecx # 4-byte Folded Reload
+ movl 36(%esp), %eax # 4-byte Reload
+ adcl 12(%esp), %eax # 4-byte Folded Reload
+ movl 40(%esp), %esi # 4-byte Reload
+ adcl 20(%esp), %esi # 4-byte Folded Reload
+ movl 44(%esp), %edx # 4-byte Reload
+ adcl $0, %edx
+ addl %ebp, %edi
+ movl 28(%esp), %edi # 4-byte Reload
+ adcl 16(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 28(%esp) # 4-byte Spill
+ adcl 24(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 24(%esp) # 4-byte Spill
+ adcl 48(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 32(%esp) # 4-byte Spill
+ adcl 52(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 36(%esp) # 4-byte Spill
+ adcl 56(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 40(%esp) # 4-byte Spill
+ adcl 60(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 44(%esp) # 4-byte Spill
+ adcl $0, 64(%esp) # 4-byte Folded Spill
+ adcl $0, 72(%esp) # 4-byte Folded Spill
+ adcl $0, 68(%esp) # 4-byte Folded Spill
+ movl %edi, %esi
+ imull 96(%esp), %esi # 4-byte Folded Reload
+ movl %esi, %eax
+ mull 76(%esp) # 4-byte Folded Reload
+ movl %edx, 60(%esp) # 4-byte Spill
+ movl %eax, 20(%esp) # 4-byte Spill
+ movl %esi, %eax
+ mull 100(%esp) # 4-byte Folded Reload
+ movl %edx, 56(%esp) # 4-byte Spill
+ movl %eax, 16(%esp) # 4-byte Spill
+ movl %esi, %eax
+ mull 88(%esp) # 4-byte Folded Reload
+ movl %edx, 52(%esp) # 4-byte Spill
+ movl %eax, 12(%esp) # 4-byte Spill
+ movl %esi, %eax
+ mull 84(%esp) # 4-byte Folded Reload
+ movl %edx, %edi
+ movl %eax, 8(%esp) # 4-byte Spill
+ movl %esi, %eax
+ mull 80(%esp) # 4-byte Folded Reload
+ movl %edx, 48(%esp) # 4-byte Spill
+ movl %eax, %ecx
+ movl %esi, %eax
+ mull 92(%esp) # 4-byte Folded Reload
+ movl %eax, %ebx
+ addl %ecx, %edx
+ movl %edx, %ebp
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 8(%esp), %eax # 4-byte Folded Reload
+ adcl 12(%esp), %edi # 4-byte Folded Reload
+ movl 52(%esp), %ecx # 4-byte Reload
+ adcl 16(%esp), %ecx # 4-byte Folded Reload
+ movl 56(%esp), %esi # 4-byte Reload
+ adcl 20(%esp), %esi # 4-byte Folded Reload
+ movl 60(%esp), %edx # 4-byte Reload
+ adcl $0, %edx
+ addl 28(%esp), %ebx # 4-byte Folded Reload
+ adcl 24(%esp), %ebp # 4-byte Folded Reload
+ adcl 32(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 48(%esp) # 4-byte Spill
+ adcl 36(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 36(%esp) # 4-byte Spill
+ adcl 40(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 52(%esp) # 4-byte Spill
+ adcl 44(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 56(%esp) # 4-byte Spill
+ adcl 64(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 60(%esp) # 4-byte Spill
+ adcl $0, 72(%esp) # 4-byte Folded Spill
+ adcl $0, 68(%esp) # 4-byte Folded Spill
+ movl 96(%esp), %ebx # 4-byte Reload
+ imull %ebp, %ebx
+ movl %ebx, 96(%esp) # 4-byte Spill
+ movl %ebp, %esi
+ movl %ebx, %eax
+ mull 76(%esp) # 4-byte Folded Reload
+ movl %edx, 64(%esp) # 4-byte Spill
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl %ebx, %eax
+ mull 100(%esp) # 4-byte Folded Reload
+ movl %edx, %ebp
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl %ebx, %eax
+ mull 88(%esp) # 4-byte Folded Reload
+ movl %edx, %edi
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl %ebx, %eax
+ mull 84(%esp) # 4-byte Folded Reload
+ movl %edx, %ecx
+ movl %eax, 24(%esp) # 4-byte Spill
+ movl %ebx, %eax
+ mull 92(%esp) # 4-byte Folded Reload
+ movl %edx, %ebx
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ mull 80(%esp) # 4-byte Folded Reload
+ addl %ebx, %eax
+ adcl 24(%esp), %edx # 4-byte Folded Reload
+ adcl 32(%esp), %ecx # 4-byte Folded Reload
+ adcl 40(%esp), %edi # 4-byte Folded Reload
+ adcl 44(%esp), %ebp # 4-byte Folded Reload
+ movl 64(%esp), %ebx # 4-byte Reload
+ adcl $0, %ebx
+ addl %esi, 28(%esp) # 4-byte Folded Spill
+ adcl 48(%esp), %eax # 4-byte Folded Reload
+ adcl 36(%esp), %edx # 4-byte Folded Reload
+ adcl 52(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 52(%esp) # 4-byte Spill
+ adcl 56(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 96(%esp) # 4-byte Spill
+ adcl 60(%esp), %ebp # 4-byte Folded Reload
+ movl %ebp, 60(%esp) # 4-byte Spill
+ adcl 72(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 64(%esp) # 4-byte Spill
+ movl 68(%esp), %ebx # 4-byte Reload
+ adcl $0, %ebx
+ movl %eax, %esi
+ subl 92(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 72(%esp) # 4-byte Spill
+ movl %edx, %esi
+ sbbl 80(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 80(%esp) # 4-byte Spill
+ sbbl 84(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 84(%esp) # 4-byte Spill
+ sbbl 88(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 88(%esp) # 4-byte Spill
+ sbbl 100(%esp), %ebp # 4-byte Folded Reload
+ movl %ebp, 92(%esp) # 4-byte Spill
+ movl 64(%esp), %edi # 4-byte Reload
+ movl %edi, %esi
+ sbbl 76(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 100(%esp) # 4-byte Spill
+ sbbl $0, %ebx
+ andl $1, %ebx
+ jne .LBB85_2
+# BB#1:
+ movl 80(%esp), %edx # 4-byte Reload
+.LBB85_2:
+ testb %bl, %bl
+ jne .LBB85_4
+# BB#3:
+ movl 72(%esp), %eax # 4-byte Reload
+.LBB85_4:
+ movl 124(%esp), %ebx
+ movl %eax, (%ebx)
+ movl %edx, 4(%ebx)
+ movl 52(%esp), %ecx # 4-byte Reload
+ jne .LBB85_6
+# BB#5:
+ movl 84(%esp), %ecx # 4-byte Reload
+.LBB85_6:
+ movl %ecx, 8(%ebx)
+ movl %edi, %ecx
+ movl 60(%esp), %edi # 4-byte Reload
+ movl 96(%esp), %esi # 4-byte Reload
+ jne .LBB85_8
+# BB#7:
+ movl 88(%esp), %esi # 4-byte Reload
+.LBB85_8:
+ movl %esi, 12(%ebx)
+ jne .LBB85_10
+# BB#9:
+ movl 92(%esp), %edi # 4-byte Reload
+.LBB85_10:
+ movl %edi, 16(%ebx)
+ jne .LBB85_12
+# BB#11:
+ movl 100(%esp), %ecx # 4-byte Reload
+.LBB85_12:
+ movl %ecx, 20(%ebx)
+ addl $104, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end85:
+ .size mcl_fp_montRed6L, .Lfunc_end85-mcl_fp_montRed6L
+
+ .globl mcl_fp_addPre6L
+ .align 16, 0x90
+ .type mcl_fp_addPre6L,@function
+mcl_fp_addPre6L: # @mcl_fp_addPre6L
+# BB#0:
+ pushl %esi
+ movl 16(%esp), %eax
+ movl (%eax), %ecx
+ movl 12(%esp), %edx
+ addl (%edx), %ecx
+ movl 8(%esp), %esi
+ movl %ecx, (%esi)
+ movl 4(%eax), %ecx
+ adcl 4(%edx), %ecx
+ movl %ecx, 4(%esi)
+ movl 8(%eax), %ecx
+ adcl 8(%edx), %ecx
+ movl %ecx, 8(%esi)
+ movl 12(%edx), %ecx
+ adcl 12(%eax), %ecx
+ movl %ecx, 12(%esi)
+ movl 16(%edx), %ecx
+ adcl 16(%eax), %ecx
+ movl %ecx, 16(%esi)
+ movl 20(%eax), %eax
+ movl 20(%edx), %ecx
+ adcl %eax, %ecx
+ movl %ecx, 20(%esi)
+ sbbl %eax, %eax
+ andl $1, %eax
+ popl %esi
+ retl
+.Lfunc_end86:
+ .size mcl_fp_addPre6L, .Lfunc_end86-mcl_fp_addPre6L
+
+ .globl mcl_fp_subPre6L
+ .align 16, 0x90
+ .type mcl_fp_subPre6L,@function
+mcl_fp_subPre6L: # @mcl_fp_subPre6L
+# BB#0:
+ pushl %edi
+ pushl %esi
+ movl 16(%esp), %ecx
+ movl (%ecx), %edx
+ xorl %eax, %eax
+ movl 20(%esp), %esi
+ subl (%esi), %edx
+ movl 12(%esp), %edi
+ movl %edx, (%edi)
+ movl 4(%ecx), %edx
+ sbbl 4(%esi), %edx
+ movl %edx, 4(%edi)
+ movl 8(%ecx), %edx
+ sbbl 8(%esi), %edx
+ movl %edx, 8(%edi)
+ movl 12(%ecx), %edx
+ sbbl 12(%esi), %edx
+ movl %edx, 12(%edi)
+ movl 16(%ecx), %edx
+ sbbl 16(%esi), %edx
+ movl %edx, 16(%edi)
+ movl 20(%esi), %edx
+ movl 20(%ecx), %ecx
+ sbbl %edx, %ecx
+ movl %ecx, 20(%edi)
+ sbbl $0, %eax
+ andl $1, %eax
+ popl %esi
+ popl %edi
+ retl
+.Lfunc_end87:
+ .size mcl_fp_subPre6L, .Lfunc_end87-mcl_fp_subPre6L
+
+ .globl mcl_fp_shr1_6L
+ .align 16, 0x90
+ .type mcl_fp_shr1_6L,@function
+mcl_fp_shr1_6L: # @mcl_fp_shr1_6L
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ movl 24(%esp), %eax
+ movl 20(%eax), %ecx
+ movl 16(%eax), %edx
+ movl 12(%eax), %esi
+ movl 8(%eax), %edi
+ movl (%eax), %ebx
+ movl 4(%eax), %eax
+ shrdl $1, %eax, %ebx
+ movl 20(%esp), %ebp
+ movl %ebx, (%ebp)
+ shrdl $1, %edi, %eax
+ movl %eax, 4(%ebp)
+ shrdl $1, %esi, %edi
+ movl %edi, 8(%ebp)
+ shrdl $1, %edx, %esi
+ movl %esi, 12(%ebp)
+ shrdl $1, %ecx, %edx
+ movl %edx, 16(%ebp)
+ shrl %ecx
+ movl %ecx, 20(%ebp)
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end88:
+ .size mcl_fp_shr1_6L, .Lfunc_end88-mcl_fp_shr1_6L
+
+ .globl mcl_fp_add6L
+ .align 16, 0x90
+ .type mcl_fp_add6L,@function
+mcl_fp_add6L: # @mcl_fp_add6L
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $12, %esp
+ movl 40(%esp), %eax
+ movl (%eax), %edx
+ movl 4(%eax), %ebp
+ movl 36(%esp), %ebx
+ addl (%ebx), %edx
+ adcl 4(%ebx), %ebp
+ movl 8(%eax), %ecx
+ adcl 8(%ebx), %ecx
+ movl %ecx, 8(%esp) # 4-byte Spill
+ movl %ecx, %esi
+ movl 12(%ebx), %ecx
+ movl 16(%ebx), %edi
+ adcl 12(%eax), %ecx
+ adcl 16(%eax), %edi
+ movl 20(%ebx), %ebx
+ adcl 20(%eax), %ebx
+ movl 32(%esp), %eax
+ movl %edx, (%eax)
+ movl %ebp, 4(%eax)
+ movl %esi, 8(%eax)
+ movl %ecx, 12(%eax)
+ movl %edi, 16(%eax)
+ movl %ebx, 20(%eax)
+ sbbl %eax, %eax
+ andl $1, %eax
+ movl 44(%esp), %esi
+ subl (%esi), %edx
+ movl %edx, (%esp) # 4-byte Spill
+ movl 8(%esp), %edx # 4-byte Reload
+ movl 44(%esp), %esi
+ sbbl 4(%esi), %ebp
+ movl %ebp, 4(%esp) # 4-byte Spill
+ movl %ecx, %ebp
+ sbbl 8(%esi), %edx
+ sbbl 12(%esi), %ebp
+ sbbl 16(%esi), %edi
+ sbbl 20(%esi), %ebx
+ sbbl $0, %eax
+ testb $1, %al
+ jne .LBB89_2
+# BB#1: # %nocarry
+ movl (%esp), %eax # 4-byte Reload
+ movl 32(%esp), %ecx
+ movl %eax, (%ecx)
+ movl 4(%esp), %eax # 4-byte Reload
+ movl %eax, 4(%ecx)
+ movl %edx, 8(%ecx)
+ movl %ebp, 12(%ecx)
+ movl %edi, 16(%ecx)
+ movl %ebx, 20(%ecx)
+.LBB89_2: # %carry
+ addl $12, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end89:
+ .size mcl_fp_add6L, .Lfunc_end89-mcl_fp_add6L
+
+ .globl mcl_fp_addNF6L
+ .align 16, 0x90
+ .type mcl_fp_addNF6L,@function
+mcl_fp_addNF6L: # @mcl_fp_addNF6L
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $40, %esp
+ movl 68(%esp), %eax
+ movl (%eax), %edx
+ movl 4(%eax), %ecx
+ movl 64(%esp), %ebp
+ addl (%ebp), %edx
+ movl %edx, 16(%esp) # 4-byte Spill
+ movl %edx, %ebx
+ adcl 4(%ebp), %ecx
+ movl %ecx, 20(%esp) # 4-byte Spill
+ movl 20(%eax), %edx
+ movl 16(%eax), %esi
+ movl 12(%eax), %edi
+ movl 8(%eax), %eax
+ adcl 8(%ebp), %eax
+ movl %eax, 24(%esp) # 4-byte Spill
+ adcl 12(%ebp), %edi
+ movl %edi, 28(%esp) # 4-byte Spill
+ adcl 16(%ebp), %esi
+ movl %esi, 32(%esp) # 4-byte Spill
+ adcl 20(%ebp), %edx
+ movl %edx, 36(%esp) # 4-byte Spill
+ movl %ebx, %ebp
+ movl 72(%esp), %ebx
+ subl (%ebx), %ebp
+ movl %ebp, (%esp) # 4-byte Spill
+ movl %ecx, %ebp
+ movl 72(%esp), %ecx
+ sbbl 4(%ecx), %ebp
+ movl %ebp, 4(%esp) # 4-byte Spill
+ sbbl 8(%ecx), %eax
+ movl %eax, 8(%esp) # 4-byte Spill
+ movl 16(%esp), %eax # 4-byte Reload
+ sbbl 12(%ecx), %edi
+ movl %edi, 12(%esp) # 4-byte Spill
+ movl %esi, %edi
+ sbbl 16(%ecx), %edi
+ movl %edx, %esi
+ sbbl 20(%ecx), %esi
+ movl %esi, %ebx
+ sarl $31, %ebx
+ testl %ebx, %ebx
+ js .LBB90_2
+# BB#1:
+ movl (%esp), %eax # 4-byte Reload
+.LBB90_2:
+ movl 60(%esp), %ebx
+ movl %eax, (%ebx)
+ movl 20(%esp), %ecx # 4-byte Reload
+ js .LBB90_4
+# BB#3:
+ movl 4(%esp), %ecx # 4-byte Reload
+.LBB90_4:
+ movl %ecx, 4(%ebx)
+ movl 36(%esp), %eax # 4-byte Reload
+ movl 28(%esp), %edx # 4-byte Reload
+ movl 24(%esp), %ecx # 4-byte Reload
+ js .LBB90_6
+# BB#5:
+ movl 8(%esp), %ecx # 4-byte Reload
+.LBB90_6:
+ movl %ecx, 8(%ebx)
+ movl 32(%esp), %ecx # 4-byte Reload
+ js .LBB90_8
+# BB#7:
+ movl 12(%esp), %edx # 4-byte Reload
+.LBB90_8:
+ movl %edx, 12(%ebx)
+ js .LBB90_10
+# BB#9:
+ movl %edi, %ecx
+.LBB90_10:
+ movl %ecx, 16(%ebx)
+ js .LBB90_12
+# BB#11:
+ movl %esi, %eax
+.LBB90_12:
+ movl %eax, 20(%ebx)
+ addl $40, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end90:
+ .size mcl_fp_addNF6L, .Lfunc_end90-mcl_fp_addNF6L
+
+ .globl mcl_fp_sub6L
+ .align 16, 0x90
+ .type mcl_fp_sub6L,@function
+mcl_fp_sub6L: # @mcl_fp_sub6L
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $16, %esp
+ movl 40(%esp), %ebx
+ movl (%ebx), %esi
+ movl 4(%ebx), %edi
+ movl 44(%esp), %ecx
+ subl (%ecx), %esi
+ sbbl 4(%ecx), %edi
+ movl %edi, (%esp) # 4-byte Spill
+ movl 8(%ebx), %eax
+ sbbl 8(%ecx), %eax
+ movl %eax, 12(%esp) # 4-byte Spill
+ movl 12(%ebx), %eax
+ sbbl 12(%ecx), %eax
+ movl %eax, 4(%esp) # 4-byte Spill
+ movl 16(%ebx), %ebp
+ sbbl 16(%ecx), %ebp
+ movl %ebp, 8(%esp) # 4-byte Spill
+ movl 20(%ebx), %edx
+ sbbl 20(%ecx), %edx
+ movl $0, %ecx
+ sbbl $0, %ecx
+ testb $1, %cl
+ movl 36(%esp), %ebx
+ movl %esi, (%ebx)
+ movl %edi, 4(%ebx)
+ movl 12(%esp), %edi # 4-byte Reload
+ movl %edi, 8(%ebx)
+ movl %eax, 12(%ebx)
+ movl %ebp, 16(%ebx)
+ movl %edx, 20(%ebx)
+ je .LBB91_2
+# BB#1: # %carry
+ movl 48(%esp), %ecx
+ addl (%ecx), %esi
+ movl %esi, (%ebx)
+ movl (%esp), %eax # 4-byte Reload
+ adcl 4(%ecx), %eax
+ adcl 8(%ecx), %edi
+ movl %eax, 4(%ebx)
+ movl 12(%ecx), %eax
+ adcl 4(%esp), %eax # 4-byte Folded Reload
+ movl %edi, 8(%ebx)
+ movl %eax, 12(%ebx)
+ movl 16(%ecx), %eax
+ adcl 8(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 16(%ebx)
+ movl 20(%ecx), %eax
+ adcl %edx, %eax
+ movl %eax, 20(%ebx)
+.LBB91_2: # %nocarry
+ addl $16, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end91:
+ .size mcl_fp_sub6L, .Lfunc_end91-mcl_fp_sub6L
+
+ .globl mcl_fp_subNF6L
+ .align 16, 0x90
+ .type mcl_fp_subNF6L,@function
+mcl_fp_subNF6L: # @mcl_fp_subNF6L
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $24, %esp
+ movl 48(%esp), %ebx
+ movl 20(%ebx), %esi
+ movl (%ebx), %ecx
+ movl 4(%ebx), %eax
+ movl 52(%esp), %ebp
+ subl (%ebp), %ecx
+ movl %ecx, 8(%esp) # 4-byte Spill
+ sbbl 4(%ebp), %eax
+ movl %eax, 16(%esp) # 4-byte Spill
+ movl 16(%ebx), %eax
+ movl 12(%ebx), %ecx
+ movl 8(%ebx), %edx
+ sbbl 8(%ebp), %edx
+ movl %edx, 4(%esp) # 4-byte Spill
+ sbbl 12(%ebp), %ecx
+ movl %ecx, 12(%esp) # 4-byte Spill
+ sbbl 16(%ebp), %eax
+ movl %eax, 20(%esp) # 4-byte Spill
+ movl %esi, %edx
+ sbbl 20(%ebp), %edx
+ movl %edx, (%esp) # 4-byte Spill
+ movl %edx, %ebp
+ sarl $31, %ebp
+ movl %ebp, %ecx
+ addl %ecx, %ecx
+ movl %ebp, %eax
+ adcl %eax, %eax
+ shrl $31, %edx
+ orl %ecx, %edx
+ movl 56(%esp), %ebx
+ andl 4(%ebx), %eax
+ andl (%ebx), %edx
+ movl 20(%ebx), %edi
+ andl %ebp, %edi
+ movl 16(%ebx), %esi
+ andl %ebp, %esi
+ movl 12(%ebx), %ecx
+ andl %ebp, %ecx
+ andl 8(%ebx), %ebp
+ addl 8(%esp), %edx # 4-byte Folded Reload
+ adcl 16(%esp), %eax # 4-byte Folded Reload
+ movl 44(%esp), %ebx
+ movl %edx, (%ebx)
+ adcl 4(%esp), %ebp # 4-byte Folded Reload
+ movl %eax, 4(%ebx)
+ adcl 12(%esp), %ecx # 4-byte Folded Reload
+ movl %ebp, 8(%ebx)
+ adcl 20(%esp), %esi # 4-byte Folded Reload
+ movl %ecx, 12(%ebx)
+ movl %esi, 16(%ebx)
+ adcl (%esp), %edi # 4-byte Folded Reload
+ movl %edi, 20(%ebx)
+ addl $24, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end92:
+ .size mcl_fp_subNF6L, .Lfunc_end92-mcl_fp_subNF6L
+
+ .globl mcl_fpDbl_add6L
+ .align 16, 0x90
+ .type mcl_fpDbl_add6L,@function
+mcl_fpDbl_add6L: # @mcl_fpDbl_add6L
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $36, %esp
+ movl 64(%esp), %edx
+ movl 60(%esp), %ecx
+ movl 12(%ecx), %esi
+ movl 16(%ecx), %eax
+ movl 8(%edx), %edi
+ movl (%edx), %ebx
+ addl (%ecx), %ebx
+ movl 56(%esp), %ebp
+ movl %ebx, (%ebp)
+ movl 4(%edx), %ebx
+ adcl 4(%ecx), %ebx
+ adcl 8(%ecx), %edi
+ adcl 12(%edx), %esi
+ adcl 16(%edx), %eax
+ movl %ebx, 4(%ebp)
+ movl %edx, %ebx
+ movl 32(%ebx), %edx
+ movl %edx, 32(%esp) # 4-byte Spill
+ movl %edi, 8(%ebp)
+ movl 20(%ebx), %edi
+ movl %esi, 12(%ebp)
+ movl 20(%ecx), %esi
+ adcl %edi, %esi
+ movl 24(%ebx), %edi
+ movl %eax, 16(%ebp)
+ movl 24(%ecx), %edx
+ adcl %edi, %edx
+ movl %edx, 20(%esp) # 4-byte Spill
+ movl 28(%ebx), %edi
+ movl %esi, 20(%ebp)
+ movl 28(%ecx), %eax
+ adcl %edi, %eax
+ movl %eax, 16(%esp) # 4-byte Spill
+ movl 32(%ecx), %ebp
+ adcl 32(%esp), %ebp # 4-byte Folded Reload
+ movl %ebp, 24(%esp) # 4-byte Spill
+ movl 36(%ebx), %esi
+ movl %ebx, %edi
+ movl 36(%ecx), %ebx
+ adcl %esi, %ebx
+ movl %ebx, 28(%esp) # 4-byte Spill
+ movl 40(%edi), %esi
+ movl 40(%ecx), %edi
+ adcl %esi, %edi
+ movl %edi, 32(%esp) # 4-byte Spill
+ movl 64(%esp), %esi
+ movl 44(%esi), %esi
+ movl 44(%ecx), %ecx
+ adcl %esi, %ecx
+ movl %ecx, 12(%esp) # 4-byte Spill
+ sbbl %ecx, %ecx
+ andl $1, %ecx
+ movl 68(%esp), %esi
+ subl (%esi), %edx
+ movl %edx, 4(%esp) # 4-byte Spill
+ movl 68(%esp), %edx
+ sbbl 4(%edx), %eax
+ movl %eax, (%esp) # 4-byte Spill
+ sbbl 8(%edx), %ebp
+ movl %ebp, 8(%esp) # 4-byte Spill
+ movl %ebx, %ebp
+ sbbl 12(%edx), %ebp
+ movl %edi, %ebx
+ movl 12(%esp), %edi # 4-byte Reload
+ sbbl 16(%edx), %ebx
+ movl %edi, %eax
+ sbbl 20(%edx), %eax
+ sbbl $0, %ecx
+ andl $1, %ecx
+ jne .LBB93_2
+# BB#1:
+ movl %eax, %edi
+.LBB93_2:
+ testb %cl, %cl
+ movl 20(%esp), %ecx # 4-byte Reload
+ movl 16(%esp), %edx # 4-byte Reload
+ jne .LBB93_4
+# BB#3:
+ movl (%esp), %edx # 4-byte Reload
+ movl 4(%esp), %ecx # 4-byte Reload
+.LBB93_4:
+ movl 56(%esp), %eax
+ movl %ecx, 24(%eax)
+ movl %edx, 28(%eax)
+ movl 32(%esp), %ecx # 4-byte Reload
+ movl 24(%esp), %edx # 4-byte Reload
+ jne .LBB93_6
+# BB#5:
+ movl 8(%esp), %edx # 4-byte Reload
+.LBB93_6:
+ movl %edx, 32(%eax)
+ movl 28(%esp), %edx # 4-byte Reload
+ jne .LBB93_8
+# BB#7:
+ movl %ebp, %edx
+.LBB93_8:
+ movl %edx, 36(%eax)
+ jne .LBB93_10
+# BB#9:
+ movl %ebx, %ecx
+.LBB93_10:
+ movl %ecx, 40(%eax)
+ movl %edi, 44(%eax)
+ addl $36, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end93:
+ .size mcl_fpDbl_add6L, .Lfunc_end93-mcl_fpDbl_add6L
+
+ .globl mcl_fpDbl_sub6L
+ .align 16, 0x90
+ .type mcl_fpDbl_sub6L,@function
+mcl_fpDbl_sub6L: # @mcl_fpDbl_sub6L
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $24, %esp
+ movl 48(%esp), %edx
+ movl (%edx), %eax
+ movl 4(%edx), %edi
+ movl 52(%esp), %esi
+ subl (%esi), %eax
+ sbbl 4(%esi), %edi
+ movl 8(%edx), %ebx
+ sbbl 8(%esi), %ebx
+ movl 44(%esp), %ecx
+ movl %eax, (%ecx)
+ movl 12(%edx), %eax
+ sbbl 12(%esi), %eax
+ movl %edi, 4(%ecx)
+ movl 16(%edx), %edi
+ sbbl 16(%esi), %edi
+ movl %ebx, 8(%ecx)
+ movl 20(%esi), %ebx
+ movl %eax, 12(%ecx)
+ movl 20(%edx), %eax
+ sbbl %ebx, %eax
+ movl 24(%esi), %ebx
+ movl %edi, 16(%ecx)
+ movl 24(%edx), %edi
+ sbbl %ebx, %edi
+ movl %edi, 8(%esp) # 4-byte Spill
+ movl 28(%esi), %edi
+ movl %eax, 20(%ecx)
+ movl 28(%edx), %eax
+ sbbl %edi, %eax
+ movl %eax, (%esp) # 4-byte Spill
+ movl 32(%esi), %edi
+ movl 32(%edx), %eax
+ sbbl %edi, %eax
+ movl %eax, 4(%esp) # 4-byte Spill
+ movl 36(%esi), %edi
+ movl 36(%edx), %eax
+ sbbl %edi, %eax
+ movl %eax, 12(%esp) # 4-byte Spill
+ movl 40(%esi), %edi
+ movl 40(%edx), %eax
+ sbbl %edi, %eax
+ movl %eax, 16(%esp) # 4-byte Spill
+ movl 44(%esi), %esi
+ movl 44(%edx), %eax
+ sbbl %esi, %eax
+ movl %eax, 20(%esp) # 4-byte Spill
+ movl $0, %ebx
+ sbbl $0, %ebx
+ andl $1, %ebx
+ movl 56(%esp), %eax
+ jne .LBB94_1
+# BB#2:
+ xorl %edx, %edx
+ jmp .LBB94_3
+.LBB94_1:
+ movl 20(%eax), %edx
+.LBB94_3:
+ testb %bl, %bl
+ jne .LBB94_4
+# BB#5:
+ movl $0, %esi
+ movl $0, %edi
+ jmp .LBB94_6
+.LBB94_4:
+ movl (%eax), %edi
+ movl 4(%eax), %esi
+.LBB94_6:
+ jne .LBB94_7
+# BB#8:
+ movl $0, %ebx
+ jmp .LBB94_9
+.LBB94_7:
+ movl 16(%eax), %ebx
+.LBB94_9:
+ jne .LBB94_10
+# BB#11:
+ movl $0, %ebp
+ jmp .LBB94_12
+.LBB94_10:
+ movl 12(%eax), %ebp
+.LBB94_12:
+ jne .LBB94_13
+# BB#14:
+ xorl %eax, %eax
+ jmp .LBB94_15
+.LBB94_13:
+ movl 8(%eax), %eax
+.LBB94_15:
+ addl 8(%esp), %edi # 4-byte Folded Reload
+ adcl (%esp), %esi # 4-byte Folded Reload
+ movl %edi, 24(%ecx)
+ adcl 4(%esp), %eax # 4-byte Folded Reload
+ movl %esi, 28(%ecx)
+ adcl 12(%esp), %ebp # 4-byte Folded Reload
+ movl %eax, 32(%ecx)
+ adcl 16(%esp), %ebx # 4-byte Folded Reload
+ movl %ebp, 36(%ecx)
+ movl %ebx, 40(%ecx)
+ adcl 20(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 44(%ecx)
+ addl $24, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end94:
+ .size mcl_fpDbl_sub6L, .Lfunc_end94-mcl_fpDbl_sub6L
+
+ .globl mcl_fp_mulUnitPre7L
+ .align 16, 0x90
+ .type mcl_fp_mulUnitPre7L,@function
+mcl_fp_mulUnitPre7L: # @mcl_fp_mulUnitPre7L
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $36, %esp
+ movl 64(%esp), %esi
+ movl 60(%esp), %ebx
+ movl %esi, %eax
+ mull 24(%ebx)
+ movl %edx, 32(%esp) # 4-byte Spill
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl %esi, %eax
+ mull 20(%ebx)
+ movl %edx, 24(%esp) # 4-byte Spill
+ movl %eax, 20(%esp) # 4-byte Spill
+ movl %esi, %eax
+ mull 16(%ebx)
+ movl %edx, 16(%esp) # 4-byte Spill
+ movl %eax, 12(%esp) # 4-byte Spill
+ movl %esi, %eax
+ mull 12(%ebx)
+ movl %edx, %ebp
+ movl %eax, 8(%esp) # 4-byte Spill
+ movl %esi, %eax
+ mull 8(%ebx)
+ movl %edx, %ecx
+ movl %eax, 4(%esp) # 4-byte Spill
+ movl %esi, %eax
+ mull 4(%ebx)
+ movl %edx, %edi
+ movl %eax, (%esp) # 4-byte Spill
+ movl %esi, %eax
+ mull (%ebx)
+ movl 56(%esp), %esi
+ movl %eax, (%esi)
+ addl (%esp), %edx # 4-byte Folded Reload
+ movl %edx, 4(%esi)
+ adcl 4(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 8(%esi)
+ adcl 8(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 12(%esi)
+ adcl 12(%esp), %ebp # 4-byte Folded Reload
+ movl %ebp, 16(%esi)
+ movl 16(%esp), %eax # 4-byte Reload
+ adcl 20(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 20(%esi)
+ movl 24(%esp), %eax # 4-byte Reload
+ adcl 28(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 24(%esi)
+ movl 32(%esp), %eax # 4-byte Reload
+ adcl $0, %eax
+ movl %eax, 28(%esi)
+ addl $36, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end95:
+ .size mcl_fp_mulUnitPre7L, .Lfunc_end95-mcl_fp_mulUnitPre7L
+
+ .globl mcl_fpDbl_mulPre7L
+ .align 16, 0x90
+ .type mcl_fpDbl_mulPre7L,@function
+mcl_fpDbl_mulPre7L: # @mcl_fpDbl_mulPre7L
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $100, %esp
+ movl 124(%esp), %ebx
+ movl (%ebx), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 128(%esp), %ecx
+ movl (%ecx), %edi
+ movl %ecx, %ebp
+ mull %edi
+ movl %edx, 96(%esp) # 4-byte Spill
+ movl 120(%esp), %ecx
+ movl %eax, (%ecx)
+ movl 4(%ebx), %ecx
+ movl 8(%ebx), %eax
+ movl %eax, 16(%esp) # 4-byte Spill
+ movl 12(%ebx), %eax
+ movl %eax, 8(%esp) # 4-byte Spill
+ movl 16(%ebx), %eax
+ movl %eax, 4(%esp) # 4-byte Spill
+ movl 20(%ebx), %esi
+ movl %esi, 76(%esp) # 4-byte Spill
+ movl 24(%ebx), %ebx
+ movl %ebx, 80(%esp) # 4-byte Spill
+ movl 4(%ebp), %ebp
+ movl %ecx, %eax
+ mull %ebp
+ movl %edx, 92(%esp) # 4-byte Spill
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ mull %ebp
+ movl %edx, 84(%esp) # 4-byte Spill
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ mull %edi
+ movl %edx, 32(%esp) # 4-byte Spill
+ movl %eax, 24(%esp) # 4-byte Spill
+ movl %ebx, %eax
+ mull %ebp
+ movl %edx, 88(%esp) # 4-byte Spill
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl %esi, %eax
+ mull %ebp
+ movl %edx, 72(%esp) # 4-byte Spill
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 4(%esp), %ebx # 4-byte Reload
+ movl %ebx, %eax
+ mull %ebp
+ movl %edx, 68(%esp) # 4-byte Spill
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 8(%esp), %esi # 4-byte Reload
+ movl %esi, %eax
+ mull %ebp
+ movl %edx, 56(%esp) # 4-byte Spill
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 16(%esp), %ecx # 4-byte Reload
+ movl %ecx, %eax
+ mull %ebp
+ movl %edx, 44(%esp) # 4-byte Spill
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ mull %edi
+ movl %edx, 80(%esp) # 4-byte Spill
+ movl %eax, 20(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ mull %edi
+ movl %edx, 76(%esp) # 4-byte Spill
+ movl %eax, 12(%esp) # 4-byte Spill
+ movl %ebx, %eax
+ mull %edi
+ movl %edx, %ebx
+ movl %eax, 4(%esp) # 4-byte Spill
+ movl %esi, %eax
+ mull %edi
+ movl %edx, %ebp
+ movl %eax, 8(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ mull %edi
+ movl %edx, %ecx
+ movl 24(%esp), %esi # 4-byte Reload
+ addl 96(%esp), %esi # 4-byte Folded Reload
+ adcl 32(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 96(%esp) # 4-byte Spill
+ adcl 8(%esp), %ecx # 4-byte Folded Reload
+ adcl 4(%esp), %ebp # 4-byte Folded Reload
+ movl %ebx, %edi
+ adcl 12(%esp), %edi # 4-byte Folded Reload
+ movl 76(%esp), %edx # 4-byte Reload
+ adcl 20(%esp), %edx # 4-byte Folded Reload
+ movl 80(%esp), %ebx # 4-byte Reload
+ adcl $0, %ebx
+ addl 52(%esp), %esi # 4-byte Folded Reload
+ movl 120(%esp), %eax
+ movl %esi, 4(%eax)
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 64(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ adcl 28(%esp), %eax # 4-byte Folded Reload
+ movl %ebp, %ecx
+ adcl 36(%esp), %ecx # 4-byte Folded Reload
+ adcl 40(%esp), %edi # 4-byte Folded Reload
+ adcl 48(%esp), %edx # 4-byte Folded Reload
+ movl %edx, %esi
+ adcl 60(%esp), %ebx # 4-byte Folded Reload
+ sbbl %edx, %edx
+ andl $1, %edx
+ movl 96(%esp), %ebp # 4-byte Reload
+ addl 84(%esp), %ebp # 4-byte Folded Reload
+ movl %ebp, 96(%esp) # 4-byte Spill
+ adcl 92(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 32(%esp) # 4-byte Spill
+ adcl 44(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 44(%esp) # 4-byte Spill
+ adcl 56(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 24(%esp) # 4-byte Spill
+ adcl 68(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 76(%esp) # 4-byte Spill
+ adcl 72(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 80(%esp) # 4-byte Spill
+ adcl 88(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 20(%esp) # 4-byte Spill
+ movl 124(%esp), %esi
+ movl 24(%esi), %ecx
+ movl %ecx, 16(%esp) # 4-byte Spill
+ movl 128(%esp), %eax
+ movl 8(%eax), %edi
+ movl %ecx, %eax
+ mull %edi
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl %edx, 68(%esp) # 4-byte Spill
+ movl 20(%esi), %eax
+ movl %eax, 12(%esp) # 4-byte Spill
+ mull %edi
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl %edx, 64(%esp) # 4-byte Spill
+ movl 16(%esi), %eax
+ movl %eax, 8(%esp) # 4-byte Spill
+ mull %edi
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl %edx, 60(%esp) # 4-byte Spill
+ movl 12(%esi), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ mull %edi
+ movl %eax, %ebp
+ movl %edx, 56(%esp) # 4-byte Spill
+ movl 8(%esi), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ mull %edi
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl %edx, 52(%esp) # 4-byte Spill
+ movl (%esi), %ecx
+ movl %ecx, 4(%esp) # 4-byte Spill
+ movl 4(%esi), %eax
+ movl %eax, (%esp) # 4-byte Spill
+ mull %edi
+ movl %edx, 48(%esp) # 4-byte Spill
+ movl %eax, %ebx
+ movl %ecx, %eax
+ mull %edi
+ movl %edx, 28(%esp) # 4-byte Spill
+ addl 96(%esp), %eax # 4-byte Folded Reload
+ movl 120(%esp), %edx
+ movl %eax, 8(%edx)
+ adcl 32(%esp), %ebx # 4-byte Folded Reload
+ movl 72(%esp), %esi # 4-byte Reload
+ adcl 44(%esp), %esi # 4-byte Folded Reload
+ adcl 24(%esp), %ebp # 4-byte Folded Reload
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl %eax, 88(%esp) # 4-byte Folded Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl %eax, 92(%esp) # 4-byte Folded Spill
+ movl 20(%esp), %eax # 4-byte Reload
+ adcl %eax, 84(%esp) # 4-byte Folded Spill
+ sbbl %edi, %edi
+ movl 128(%esp), %eax
+ movl 12(%eax), %ecx
+ movl 16(%esp), %eax # 4-byte Reload
+ mull %ecx
+ movl %edx, 96(%esp) # 4-byte Spill
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 12(%esp), %eax # 4-byte Reload
+ mull %ecx
+ movl %edx, 80(%esp) # 4-byte Spill
+ movl %eax, 24(%esp) # 4-byte Spill
+ movl 8(%esp), %eax # 4-byte Reload
+ mull %ecx
+ movl %edx, 76(%esp) # 4-byte Spill
+ movl %eax, 16(%esp) # 4-byte Spill
+ movl (%esp), %eax # 4-byte Reload
+ mull %ecx
+ movl %edx, 32(%esp) # 4-byte Spill
+ movl %eax, 12(%esp) # 4-byte Spill
+ movl 4(%esp), %eax # 4-byte Reload
+ mull %ecx
+ movl %edx, 20(%esp) # 4-byte Spill
+ movl %eax, 4(%esp) # 4-byte Spill
+ movl 40(%esp), %eax # 4-byte Reload
+ mull %ecx
+ movl %edx, 40(%esp) # 4-byte Spill
+ movl %eax, 8(%esp) # 4-byte Spill
+ movl 36(%esp), %eax # 4-byte Reload
+ mull %ecx
+ movl %eax, (%esp) # 4-byte Spill
+ movl %edx, 36(%esp) # 4-byte Spill
+ andl $1, %edi
+ addl 28(%esp), %ebx # 4-byte Folded Reload
+ adcl 48(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 72(%esp) # 4-byte Spill
+ movl %ebp, %esi
+ adcl 52(%esp), %esi # 4-byte Folded Reload
+ movl 88(%esp), %edx # 4-byte Reload
+ adcl 56(%esp), %edx # 4-byte Folded Reload
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 60(%esp), %eax # 4-byte Folded Reload
+ movl 84(%esp), %ecx # 4-byte Reload
+ adcl 64(%esp), %ecx # 4-byte Folded Reload
+ adcl 68(%esp), %edi # 4-byte Folded Reload
+ addl 4(%esp), %ebx # 4-byte Folded Reload
+ movl 120(%esp), %ebp
+ movl %ebx, 12(%ebp)
+ movl 72(%esp), %ebp # 4-byte Reload
+ adcl 12(%esp), %ebp # 4-byte Folded Reload
+ adcl (%esp), %esi # 4-byte Folded Reload
+ movl %esi, %ebx
+ adcl 8(%esp), %edx # 4-byte Folded Reload
+ movl %edx, %esi
+ adcl 16(%esp), %eax # 4-byte Folded Reload
+ movl %eax, %edx
+ adcl 24(%esp), %ecx # 4-byte Folded Reload
+ adcl 44(%esp), %edi # 4-byte Folded Reload
+ sbbl %eax, %eax
+ andl $1, %eax
+ addl 20(%esp), %ebp # 4-byte Folded Reload
+ adcl 32(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 28(%esp) # 4-byte Spill
+ adcl 36(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 88(%esp) # 4-byte Spill
+ adcl 40(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 92(%esp) # 4-byte Spill
+ adcl 76(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 84(%esp) # 4-byte Spill
+ adcl 80(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 24(%esp) # 4-byte Spill
+ adcl 96(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 20(%esp) # 4-byte Spill
+ movl 124(%esp), %ebx
+ movl 24(%ebx), %edx
+ movl %edx, 96(%esp) # 4-byte Spill
+ movl 128(%esp), %eax
+ movl 16(%eax), %ecx
+ movl %edx, %eax
+ mull %ecx
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl %edx, 60(%esp) # 4-byte Spill
+ movl 20(%ebx), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ mull %ecx
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl %edx, 32(%esp) # 4-byte Spill
+ movl 16(%ebx), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ mull %ecx
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl %edx, 16(%esp) # 4-byte Spill
+ movl 12(%ebx), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ mull %ecx
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl %edx, 12(%esp) # 4-byte Spill
+ movl 8(%ebx), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ mull %ecx
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl %edx, 8(%esp) # 4-byte Spill
+ movl (%ebx), %edi
+ movl %edi, 40(%esp) # 4-byte Spill
+ movl 4(%ebx), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ mull %ecx
+ movl %edx, 4(%esp) # 4-byte Spill
+ movl %eax, %esi
+ movl %edi, %eax
+ mull %ecx
+ movl %edx, (%esp) # 4-byte Spill
+ addl %ebp, %eax
+ movl 120(%esp), %ecx
+ movl %eax, 16(%ecx)
+ adcl 28(%esp), %esi # 4-byte Folded Reload
+ movl %esi, %edi
+ movl 64(%esp), %esi # 4-byte Reload
+ adcl 88(%esp), %esi # 4-byte Folded Reload
+ movl 68(%esp), %edx # 4-byte Reload
+ adcl 92(%esp), %edx # 4-byte Folded Reload
+ movl 72(%esp), %ecx # 4-byte Reload
+ adcl 84(%esp), %ecx # 4-byte Folded Reload
+ movl 76(%esp), %ebp # 4-byte Reload
+ adcl 24(%esp), %ebp # 4-byte Folded Reload
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 20(%esp), %eax # 4-byte Folded Reload
+ sbbl %ebx, %ebx
+ andl $1, %ebx
+ addl (%esp), %edi # 4-byte Folded Reload
+ adcl 4(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 64(%esp) # 4-byte Spill
+ adcl 8(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 68(%esp) # 4-byte Spill
+ adcl 12(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 72(%esp) # 4-byte Spill
+ adcl 16(%esp), %ebp # 4-byte Folded Reload
+ movl %ebp, 76(%esp) # 4-byte Spill
+ adcl 32(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 80(%esp) # 4-byte Spill
+ adcl 60(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 92(%esp) # 4-byte Spill
+ movl 128(%esp), %eax
+ movl 20(%eax), %esi
+ movl 96(%esp), %eax # 4-byte Reload
+ mull %esi
+ movl %edx, 60(%esp) # 4-byte Spill
+ movl %eax, %ecx
+ movl 56(%esp), %eax # 4-byte Reload
+ mull %esi
+ movl %edx, 56(%esp) # 4-byte Spill
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ mull %esi
+ movl %edx, 52(%esp) # 4-byte Spill
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ mull %esi
+ movl %edx, 48(%esp) # 4-byte Spill
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 44(%esp), %eax # 4-byte Reload
+ mull %esi
+ movl %edx, 44(%esp) # 4-byte Spill
+ movl %eax, %ebp
+ movl 36(%esp), %eax # 4-byte Reload
+ mull %esi
+ movl %edx, 36(%esp) # 4-byte Spill
+ movl %eax, %ebx
+ movl 40(%esp), %eax # 4-byte Reload
+ mull %esi
+ movl %edx, 40(%esp) # 4-byte Spill
+ addl %edi, %eax
+ movl 120(%esp), %edx
+ movl %eax, 20(%edx)
+ adcl 64(%esp), %ebx # 4-byte Folded Reload
+ adcl 68(%esp), %ebp # 4-byte Folded Reload
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl %eax, 84(%esp) # 4-byte Folded Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl %eax, 88(%esp) # 4-byte Folded Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl %eax, 96(%esp) # 4-byte Folded Spill
+ adcl 92(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 92(%esp) # 4-byte Spill
+ movl 128(%esp), %eax
+ movl 24(%eax), %ecx
+ sbbl %esi, %esi
+ movl %ecx, %eax
+ movl 124(%esp), %edi
+ mull 24(%edi)
+ movl %edx, 80(%esp) # 4-byte Spill
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ mull 20(%edi)
+ movl %edx, 76(%esp) # 4-byte Spill
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ mull 16(%edi)
+ movl %edx, 72(%esp) # 4-byte Spill
+ movl %eax, 24(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ mull 12(%edi)
+ movl %edx, 68(%esp) # 4-byte Spill
+ movl %eax, 16(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ mull 8(%edi)
+ movl %edx, 32(%esp) # 4-byte Spill
+ movl %eax, 8(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ mull 4(%edi)
+ movl %edx, 20(%esp) # 4-byte Spill
+ movl %eax, 4(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ mull (%edi)
+ movl %eax, (%esp) # 4-byte Spill
+ movl %edx, 12(%esp) # 4-byte Spill
+ andl $1, %esi
+ addl 40(%esp), %ebx # 4-byte Folded Reload
+ adcl 36(%esp), %ebp # 4-byte Folded Reload
+ movl 84(%esp), %edx # 4-byte Reload
+ adcl 44(%esp), %edx # 4-byte Folded Reload
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 48(%esp), %eax # 4-byte Folded Reload
+ movl 96(%esp), %edi # 4-byte Reload
+ adcl 52(%esp), %edi # 4-byte Folded Reload
+ movl 92(%esp), %ecx # 4-byte Reload
+ adcl 56(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 92(%esp) # 4-byte Spill
+ adcl 60(%esp), %esi # 4-byte Folded Reload
+ addl (%esp), %ebx # 4-byte Folded Reload
+ movl 120(%esp), %ecx
+ movl %ebx, 24(%ecx)
+ adcl 4(%esp), %ebp # 4-byte Folded Reload
+ adcl 8(%esp), %edx # 4-byte Folded Reload
+ movl %edx, %ebx
+ adcl 16(%esp), %eax # 4-byte Folded Reload
+ movl %eax, %edx
+ adcl 24(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 96(%esp) # 4-byte Spill
+ movl 92(%esp), %edi # 4-byte Reload
+ adcl 28(%esp), %edi # 4-byte Folded Reload
+ adcl 64(%esp), %esi # 4-byte Folded Reload
+ sbbl %eax, %eax
+ andl $1, %eax
+ addl 12(%esp), %ebp # 4-byte Folded Reload
+ adcl 20(%esp), %ebx # 4-byte Folded Reload
+ movl %ebp, 28(%ecx)
+ adcl 32(%esp), %edx # 4-byte Folded Reload
+ movl %ebx, 32(%ecx)
+ movl 96(%esp), %ebx # 4-byte Reload
+ adcl 68(%esp), %ebx # 4-byte Folded Reload
+ movl %edx, 36(%ecx)
+ movl %edi, %edx
+ adcl 72(%esp), %edx # 4-byte Folded Reload
+ movl %ebx, 40(%ecx)
+ adcl 76(%esp), %esi # 4-byte Folded Reload
+ movl %edx, 44(%ecx)
+ movl %esi, 48(%ecx)
+ adcl 80(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 52(%ecx)
+ addl $100, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end96:
+ .size mcl_fpDbl_mulPre7L, .Lfunc_end96-mcl_fpDbl_mulPre7L
+
+ .globl mcl_fpDbl_sqrPre7L
+ .align 16, 0x90
+ .type mcl_fpDbl_sqrPre7L,@function
+mcl_fpDbl_sqrPre7L: # @mcl_fpDbl_sqrPre7L
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $100, %esp
+ movl 124(%esp), %esi
+ movl 24(%esi), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl (%esi), %ebx
+ movl 4(%esi), %edi
+ mull %edi
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl %edx, 80(%esp) # 4-byte Spill
+ movl 20(%esi), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ mull %edi
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl %edx, 76(%esp) # 4-byte Spill
+ movl 16(%esi), %ecx
+ movl %ecx, %eax
+ mull %edi
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl %edx, 72(%esp) # 4-byte Spill
+ movl 12(%esi), %esi
+ movl %esi, %eax
+ mull %edi
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl %edx, 64(%esp) # 4-byte Spill
+ movl 124(%esp), %eax
+ movl 8(%eax), %ebp
+ movl %ebp, %eax
+ mull %edi
+ movl %edx, 56(%esp) # 4-byte Spill
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ mull %ebx
+ movl %edx, 88(%esp) # 4-byte Spill
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ mull %ebx
+ movl %edx, 84(%esp) # 4-byte Spill
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ mull %ebx
+ movl %edx, 96(%esp) # 4-byte Spill
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl %esi, %eax
+ mull %ebx
+ movl %edx, 92(%esp) # 4-byte Spill
+ movl %eax, 16(%esp) # 4-byte Spill
+ movl %ebp, %eax
+ mull %ebx
+ movl %edx, %ebp
+ movl %eax, %ecx
+ movl %edi, %eax
+ mull %edi
+ movl %edx, 40(%esp) # 4-byte Spill
+ movl %eax, 24(%esp) # 4-byte Spill
+ movl %edi, %eax
+ mull %ebx
+ movl %edx, %esi
+ movl %esi, 20(%esp) # 4-byte Spill
+ movl %eax, %edi
+ movl %ebx, %eax
+ mull %ebx
+ movl 120(%esp), %ebx
+ movl %eax, (%ebx)
+ addl %edi, %edx
+ adcl %esi, %ecx
+ adcl 16(%esp), %ebp # 4-byte Folded Reload
+ movl %ebp, %esi
+ movl 28(%esp), %eax # 4-byte Reload
+ adcl %eax, 92(%esp) # 4-byte Folded Spill
+ movl 32(%esp), %eax # 4-byte Reload
+ adcl %eax, 96(%esp) # 4-byte Folded Spill
+ movl 84(%esp), %ebp # 4-byte Reload
+ adcl 36(%esp), %ebp # 4-byte Folded Reload
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl $0, %eax
+ addl %edi, %edx
+ movl %edx, 4(%ebx)
+ adcl 24(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, %ebx
+ adcl 44(%esp), %esi # 4-byte Folded Reload
+ movl %esi, %edi
+ movl 92(%esp), %edx # 4-byte Reload
+ adcl 48(%esp), %edx # 4-byte Folded Reload
+ movl 96(%esp), %ecx # 4-byte Reload
+ adcl 52(%esp), %ecx # 4-byte Folded Reload
+ adcl 60(%esp), %ebp # 4-byte Folded Reload
+ movl %ebp, %esi
+ adcl 68(%esp), %eax # 4-byte Folded Reload
+ sbbl %ebp, %ebp
+ andl $1, %ebp
+ addl 20(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 28(%esp) # 4-byte Spill
+ adcl 40(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 32(%esp) # 4-byte Spill
+ adcl 56(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 92(%esp) # 4-byte Spill
+ adcl 64(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 96(%esp) # 4-byte Spill
+ adcl 72(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 84(%esp) # 4-byte Spill
+ adcl 76(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 88(%esp) # 4-byte Spill
+ adcl 80(%esp), %ebp # 4-byte Folded Reload
+ movl 124(%esp), %edi
+ movl 24(%edi), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 8(%edi), %esi
+ mull %esi
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl %edx, 40(%esp) # 4-byte Spill
+ movl 20(%edi), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ mull %esi
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl %edx, 36(%esp) # 4-byte Spill
+ movl 16(%edi), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ mull %esi
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl %edx, 16(%esp) # 4-byte Spill
+ movl 12(%edi), %ebx
+ movl %ebx, %eax
+ mull %esi
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl %edx, 64(%esp) # 4-byte Spill
+ movl (%edi), %ecx
+ movl %ecx, 20(%esp) # 4-byte Spill
+ movl 4(%edi), %eax
+ movl %eax, 24(%esp) # 4-byte Spill
+ mull %esi
+ movl %edx, 12(%esp) # 4-byte Spill
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ mull %esi
+ movl %edx, 8(%esp) # 4-byte Spill
+ movl %eax, %edi
+ movl %esi, %eax
+ mull %esi
+ movl %eax, %ecx
+ movl %edx, 4(%esp) # 4-byte Spill
+ addl 28(%esp), %edi # 4-byte Folded Reload
+ movl 120(%esp), %eax
+ movl %edi, 8(%eax)
+ movl 68(%esp), %edx # 4-byte Reload
+ adcl 32(%esp), %edx # 4-byte Folded Reload
+ adcl 92(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 56(%esp) # 4-byte Spill
+ movl 96(%esp), %ecx # 4-byte Reload
+ adcl 60(%esp), %ecx # 4-byte Folded Reload
+ movl 72(%esp), %esi # 4-byte Reload
+ adcl 84(%esp), %esi # 4-byte Folded Reload
+ movl 76(%esp), %edi # 4-byte Reload
+ adcl 88(%esp), %edi # 4-byte Folded Reload
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl %ebp, %eax
+ sbbl %ebp, %ebp
+ andl $1, %ebp
+ addl 8(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 68(%esp) # 4-byte Spill
+ movl 12(%esp), %edx # 4-byte Reload
+ adcl %edx, 56(%esp) # 4-byte Folded Spill
+ adcl 4(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 96(%esp) # 4-byte Spill
+ adcl 64(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 72(%esp) # 4-byte Spill
+ adcl 16(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 76(%esp) # 4-byte Spill
+ adcl 36(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 80(%esp) # 4-byte Spill
+ adcl 40(%esp), %ebp # 4-byte Folded Reload
+ movl 52(%esp), %eax # 4-byte Reload
+ mull %ebx
+ movl %edx, 36(%esp) # 4-byte Spill
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ mull %ebx
+ movl %edx, 32(%esp) # 4-byte Spill
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 44(%esp), %eax # 4-byte Reload
+ mull %ebx
+ movl %edx, 28(%esp) # 4-byte Spill
+ movl %eax, %esi
+ movl 24(%esp), %eax # 4-byte Reload
+ mull %ebx
+ movl %edx, 24(%esp) # 4-byte Spill
+ movl %eax, %ecx
+ movl 20(%esp), %eax # 4-byte Reload
+ mull %ebx
+ movl %edx, 20(%esp) # 4-byte Spill
+ movl %eax, %edi
+ movl %ebx, %eax
+ mull %ebx
+ movl %eax, %ebx
+ movl %edx, 16(%esp) # 4-byte Spill
+ addl 68(%esp), %edi # 4-byte Folded Reload
+ movl 120(%esp), %eax
+ movl %edi, 12(%eax)
+ adcl 56(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 56(%esp) # 4-byte Spill
+ movl 96(%esp), %ecx # 4-byte Reload
+ adcl 60(%esp), %ecx # 4-byte Folded Reload
+ adcl 72(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 96(%esp) # 4-byte Spill
+ adcl 76(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 60(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl %eax, 88(%esp) # 4-byte Folded Spill
+ adcl %ebp, 92(%esp) # 4-byte Folded Spill
+ sbbl %eax, %eax
+ andl $1, %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 124(%esp), %eax
+ movl (%eax), %esi
+ movl 4(%eax), %edi
+ movl 20(%eax), %ebx
+ movl %edi, %eax
+ mull %ebx
+ movl %edx, 52(%esp) # 4-byte Spill
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl %esi, %eax
+ mull %ebx
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl %edx, 48(%esp) # 4-byte Spill
+ movl 124(%esp), %eax
+ movl 16(%eax), %ebp
+ movl %edi, %eax
+ mull %ebp
+ movl %edx, 80(%esp) # 4-byte Spill
+ movl %eax, 8(%esp) # 4-byte Spill
+ movl %esi, %eax
+ mull %ebp
+ movl %eax, 4(%esp) # 4-byte Spill
+ movl %edx, 12(%esp) # 4-byte Spill
+ movl 20(%esp), %eax # 4-byte Reload
+ addl %eax, 56(%esp) # 4-byte Folded Spill
+ adcl 24(%esp), %ecx # 4-byte Folded Reload
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 64(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 16(%esp), %eax # 4-byte Reload
+ adcl %eax, 60(%esp) # 4-byte Folded Spill
+ movl 28(%esp), %eax # 4-byte Reload
+ adcl %eax, 88(%esp) # 4-byte Folded Spill
+ movl 32(%esp), %eax # 4-byte Reload
+ adcl %eax, 92(%esp) # 4-byte Folded Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 36(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 124(%esp), %esi
+ movl 24(%esi), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ mull %ebp
+ movl %edx, 36(%esp) # 4-byte Spill
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl %ebx, %eax
+ mull %ebp
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl %edx, 68(%esp) # 4-byte Spill
+ movl 12(%esi), %eax
+ movl %eax, 28(%esp) # 4-byte Spill
+ mull %ebp
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl %edx, 20(%esp) # 4-byte Spill
+ movl 8(%esi), %eax
+ movl %eax, 24(%esp) # 4-byte Spill
+ mull %ebp
+ movl %edx, 16(%esp) # 4-byte Spill
+ movl %eax, %edi
+ movl %ebp, %eax
+ mull %ebp
+ movl %eax, %esi
+ movl %edx, (%esp) # 4-byte Spill
+ movl 56(%esp), %ebp # 4-byte Reload
+ addl 4(%esp), %ebp # 4-byte Folded Reload
+ movl 120(%esp), %eax
+ movl %ebp, 16(%eax)
+ movl %ecx, %eax
+ adcl 8(%esp), %eax # 4-byte Folded Reload
+ movl %edi, %ebp
+ adcl 96(%esp), %ebp # 4-byte Folded Reload
+ movl 72(%esp), %ecx # 4-byte Reload
+ adcl 60(%esp), %ecx # 4-byte Folded Reload
+ adcl 88(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 88(%esp) # 4-byte Spill
+ movl 92(%esp), %esi # 4-byte Reload
+ adcl 64(%esp), %esi # 4-byte Folded Reload
+ movl 76(%esp), %edx # 4-byte Reload
+ adcl 84(%esp), %edx # 4-byte Folded Reload
+ sbbl %edi, %edi
+ andl $1, %edi
+ addl 12(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 96(%esp) # 4-byte Spill
+ adcl 80(%esp), %ebp # 4-byte Folded Reload
+ movl %ebp, 80(%esp) # 4-byte Spill
+ adcl 16(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 72(%esp) # 4-byte Spill
+ movl 88(%esp), %ebp # 4-byte Reload
+ adcl 20(%esp), %ebp # 4-byte Folded Reload
+ adcl (%esp), %esi # 4-byte Folded Reload
+ movl %esi, 92(%esp) # 4-byte Spill
+ adcl 68(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 76(%esp) # 4-byte Spill
+ adcl 36(%esp), %edi # 4-byte Folded Reload
+ movl 32(%esp), %eax # 4-byte Reload
+ mull %ebx
+ movl %edx, 60(%esp) # 4-byte Spill
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 28(%esp), %eax # 4-byte Reload
+ mull %ebx
+ movl %edx, 36(%esp) # 4-byte Spill
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 24(%esp), %eax # 4-byte Reload
+ mull %ebx
+ movl %edx, 28(%esp) # 4-byte Spill
+ movl %eax, %ecx
+ movl %ebx, %eax
+ mull %ebx
+ movl %eax, %esi
+ movl %edx, 32(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ addl 40(%esp), %eax # 4-byte Folded Reload
+ movl 120(%esp), %edx
+ movl %eax, 20(%edx)
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 44(%esp), %eax # 4-byte Folded Reload
+ movl %ecx, %edx
+ adcl 72(%esp), %edx # 4-byte Folded Reload
+ movl 84(%esp), %ebx # 4-byte Reload
+ adcl %ebp, %ebx
+ movl 92(%esp), %ecx # 4-byte Reload
+ adcl 64(%esp), %ecx # 4-byte Folded Reload
+ adcl 76(%esp), %esi # 4-byte Folded Reload
+ movl 88(%esp), %ebp # 4-byte Reload
+ adcl %edi, %ebp
+ sbbl %edi, %edi
+ andl $1, %edi
+ addl 48(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 80(%esp) # 4-byte Spill
+ adcl 52(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 56(%esp) # 4-byte Spill
+ adcl 28(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 84(%esp) # 4-byte Spill
+ adcl 36(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 92(%esp) # 4-byte Spill
+ adcl 68(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 64(%esp) # 4-byte Spill
+ adcl 32(%esp), %ebp # 4-byte Folded Reload
+ movl %ebp, 88(%esp) # 4-byte Spill
+ adcl 60(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 60(%esp) # 4-byte Spill
+ movl 124(%esp), %esi
+ movl 24(%esi), %ecx
+ movl %ecx, %eax
+ mull 20(%esi)
+ movl %edx, 76(%esp) # 4-byte Spill
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ mull 16(%esi)
+ movl %edx, 72(%esp) # 4-byte Spill
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ mull 12(%esi)
+ movl %edx, 68(%esp) # 4-byte Spill
+ movl %eax, %ebx
+ movl %ecx, %eax
+ mull 8(%esi)
+ movl %edx, 48(%esp) # 4-byte Spill
+ movl %eax, %ebp
+ movl %ecx, %eax
+ mull 4(%esi)
+ movl %edx, 40(%esp) # 4-byte Spill
+ movl %eax, %edi
+ movl %ecx, %eax
+ mull (%esi)
+ movl %edx, 36(%esp) # 4-byte Spill
+ movl %eax, %esi
+ movl %ecx, %eax
+ mull %ecx
+ movl %edx, 52(%esp) # 4-byte Spill
+ addl 80(%esp), %esi # 4-byte Folded Reload
+ movl 120(%esp), %edx
+ movl %esi, 24(%edx)
+ movl %edx, %esi
+ adcl 56(%esp), %edi # 4-byte Folded Reload
+ adcl 84(%esp), %ebp # 4-byte Folded Reload
+ adcl 92(%esp), %ebx # 4-byte Folded Reload
+ movl 64(%esp), %ecx # 4-byte Reload
+ adcl %ecx, 96(%esp) # 4-byte Folded Spill
+ movl 44(%esp), %edx # 4-byte Reload
+ adcl 88(%esp), %edx # 4-byte Folded Reload
+ adcl 60(%esp), %eax # 4-byte Folded Reload
+ sbbl %ecx, %ecx
+ andl $1, %ecx
+ addl 36(%esp), %edi # 4-byte Folded Reload
+ adcl 40(%esp), %ebp # 4-byte Folded Reload
+ movl %edi, 28(%esi)
+ adcl 48(%esp), %ebx # 4-byte Folded Reload
+ movl %ebp, 32(%esi)
+ movl 96(%esp), %edi # 4-byte Reload
+ adcl 68(%esp), %edi # 4-byte Folded Reload
+ movl %ebx, 36(%esi)
+ adcl 72(%esp), %edx # 4-byte Folded Reload
+ movl %edi, 40(%esi)
+ adcl 76(%esp), %eax # 4-byte Folded Reload
+ movl %edx, 44(%esi)
+ movl %eax, 48(%esi)
+ adcl 52(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 52(%esi)
+ addl $100, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end97:
+ .size mcl_fpDbl_sqrPre7L, .Lfunc_end97-mcl_fpDbl_sqrPre7L
+
+ .globl mcl_fp_mont7L
+ .align 16, 0x90
+ .type mcl_fp_mont7L,@function
+mcl_fp_mont7L: # @mcl_fp_mont7L
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $152, %esp
+ movl 176(%esp), %esi
+ movl (%esi), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 180(%esp), %edx
+ movl (%edx), %ecx
+ movl %ecx, 72(%esp) # 4-byte Spill
+ mull %ecx
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl %edx, 76(%esp) # 4-byte Spill
+ movl 184(%esp), %ecx
+ movl -4(%ecx), %edx
+ movl %edx, 132(%esp) # 4-byte Spill
+ movl %eax, %ebx
+ imull %edx, %ebx
+ movl (%ecx), %eax
+ movl %eax, 128(%esp) # 4-byte Spill
+ movl 24(%ecx), %edx
+ movl %edx, 120(%esp) # 4-byte Spill
+ movl 20(%ecx), %eax
+ movl %eax, 148(%esp) # 4-byte Spill
+ movl 16(%ecx), %eax
+ movl %eax, 144(%esp) # 4-byte Spill
+ movl 12(%ecx), %eax
+ movl %eax, 140(%esp) # 4-byte Spill
+ movl 8(%ecx), %eax
+ movl %eax, 136(%esp) # 4-byte Spill
+ movl 4(%ecx), %eax
+ movl %eax, 124(%esp) # 4-byte Spill
+ movl 4(%esi), %eax
+ movl %eax, 116(%esp) # 4-byte Spill
+ movl %esi, %eax
+ movl 24(%eax), %ecx
+ movl %ecx, 100(%esp) # 4-byte Spill
+ movl 20(%eax), %esi
+ movl %esi, 108(%esp) # 4-byte Spill
+ movl 16(%eax), %ebp
+ movl %ebp, 96(%esp) # 4-byte Spill
+ movl 12(%eax), %edi
+ movl %edi, 92(%esp) # 4-byte Spill
+ movl 8(%eax), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl %ebx, %eax
+ mull %edx
+ movl %edx, 64(%esp) # 4-byte Spill
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl %ebx, %eax
+ mull 148(%esp) # 4-byte Folded Reload
+ movl %edx, 60(%esp) # 4-byte Spill
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl %ebx, %eax
+ mull 144(%esp) # 4-byte Folded Reload
+ movl %edx, 56(%esp) # 4-byte Spill
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl %ebx, %eax
+ mull 140(%esp) # 4-byte Folded Reload
+ movl %edx, 52(%esp) # 4-byte Spill
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl %ebx, %eax
+ mull 136(%esp) # 4-byte Folded Reload
+ movl %edx, 48(%esp) # 4-byte Spill
+ movl %eax, 24(%esp) # 4-byte Spill
+ movl %ebx, %eax
+ mull 124(%esp) # 4-byte Folded Reload
+ movl %edx, 44(%esp) # 4-byte Spill
+ movl %eax, 20(%esp) # 4-byte Spill
+ movl %ebx, %eax
+ mull 128(%esp) # 4-byte Folded Reload
+ movl %edx, 88(%esp) # 4-byte Spill
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ movl 72(%esp), %ecx # 4-byte Reload
+ mull %ecx
+ movl %edx, %esi
+ movl %eax, 16(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ mull %ecx
+ movl %edx, 84(%esp) # 4-byte Spill
+ movl %eax, 12(%esp) # 4-byte Spill
+ movl %ebp, %eax
+ mull %ecx
+ movl %edx, %ebp
+ movl %eax, 4(%esp) # 4-byte Spill
+ movl %edi, %eax
+ mull %ecx
+ movl %edx, %edi
+ movl %eax, (%esp) # 4-byte Spill
+ movl 112(%esp), %eax # 4-byte Reload
+ mull %ecx
+ movl %edx, %ebx
+ movl %eax, 8(%esp) # 4-byte Spill
+ movl 116(%esp), %eax # 4-byte Reload
+ mull %ecx
+ addl 76(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 76(%esp) # 4-byte Spill
+ adcl 8(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 8(%esp) # 4-byte Spill
+ adcl (%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, (%esp) # 4-byte Spill
+ adcl 4(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 4(%esp) # 4-byte Spill
+ adcl 12(%esp), %ebp # 4-byte Folded Reload
+ movl %ebp, 12(%esp) # 4-byte Spill
+ movl 16(%esp), %eax # 4-byte Reload
+ adcl %eax, 84(%esp) # 4-byte Folded Spill
+ adcl $0, %esi
+ movl %esi, 72(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ addl 20(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 44(%esp), %ebx # 4-byte Reload
+ adcl 24(%esp), %ebx # 4-byte Folded Reload
+ movl 48(%esp), %edi # 4-byte Reload
+ adcl 32(%esp), %edi # 4-byte Folded Reload
+ movl 52(%esp), %edx # 4-byte Reload
+ adcl 36(%esp), %edx # 4-byte Folded Reload
+ movl 56(%esp), %ecx # 4-byte Reload
+ adcl 40(%esp), %ecx # 4-byte Folded Reload
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 68(%esp), %eax # 4-byte Folded Reload
+ movl 64(%esp), %esi # 4-byte Reload
+ adcl $0, %esi
+ movl 28(%esp), %ebp # 4-byte Reload
+ addl 80(%esp), %ebp # 4-byte Folded Reload
+ movl 76(%esp), %ebp # 4-byte Reload
+ adcl %ebp, 88(%esp) # 4-byte Folded Spill
+ adcl 8(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 44(%esp) # 4-byte Spill
+ adcl (%esp), %edi # 4-byte Folded Reload
+ movl %edi, 48(%esp) # 4-byte Spill
+ adcl 4(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 52(%esp) # 4-byte Spill
+ adcl 12(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 56(%esp) # 4-byte Spill
+ adcl 84(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 60(%esp) # 4-byte Spill
+ adcl 72(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 64(%esp) # 4-byte Spill
+ sbbl %eax, %eax
+ andl $1, %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 180(%esp), %eax
+ movl 4(%eax), %ecx
+ movl %ecx, %eax
+ mull 100(%esp) # 4-byte Folded Reload
+ movl %edx, 68(%esp) # 4-byte Spill
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ mull 108(%esp) # 4-byte Folded Reload
+ movl %edx, 72(%esp) # 4-byte Spill
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ mull 96(%esp) # 4-byte Folded Reload
+ movl %edx, 76(%esp) # 4-byte Spill
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ mull 92(%esp) # 4-byte Folded Reload
+ movl %edx, 80(%esp) # 4-byte Spill
+ movl %eax, 24(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ mull 112(%esp) # 4-byte Folded Reload
+ movl %edx, %ebp
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ mull 116(%esp) # 4-byte Folded Reload
+ movl %edx, %esi
+ movl %eax, %edi
+ movl %ecx, %eax
+ mull 104(%esp) # 4-byte Folded Reload
+ movl %edx, %ebx
+ addl %edi, %ebx
+ adcl 84(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 84(%esp) # 4-byte Spill
+ adcl 24(%esp), %ebp # 4-byte Folded Reload
+ movl 80(%esp), %esi # 4-byte Reload
+ adcl 28(%esp), %esi # 4-byte Folded Reload
+ movl 76(%esp), %edx # 4-byte Reload
+ adcl 32(%esp), %edx # 4-byte Folded Reload
+ movl 72(%esp), %ecx # 4-byte Reload
+ adcl 36(%esp), %ecx # 4-byte Folded Reload
+ movl 68(%esp), %edi # 4-byte Reload
+ adcl $0, %edi
+ addl 88(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 88(%esp) # 4-byte Spill
+ adcl 44(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 44(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 48(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 84(%esp) # 4-byte Spill
+ adcl 52(%esp), %ebp # 4-byte Folded Reload
+ movl %ebp, 48(%esp) # 4-byte Spill
+ adcl 56(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 80(%esp) # 4-byte Spill
+ adcl 60(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 76(%esp) # 4-byte Spill
+ adcl 64(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 72(%esp) # 4-byte Spill
+ adcl 40(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 68(%esp) # 4-byte Spill
+ sbbl %eax, %eax
+ movl 88(%esp), %ecx # 4-byte Reload
+ imull 132(%esp), %ecx # 4-byte Folded Reload
+ andl $1, %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ mull 120(%esp) # 4-byte Folded Reload
+ movl %edx, 64(%esp) # 4-byte Spill
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ mull 148(%esp) # 4-byte Folded Reload
+ movl %edx, 60(%esp) # 4-byte Spill
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ mull 144(%esp) # 4-byte Folded Reload
+ movl %edx, 56(%esp) # 4-byte Spill
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ mull 140(%esp) # 4-byte Folded Reload
+ movl %edx, 52(%esp) # 4-byte Spill
+ movl %eax, 24(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ mull 136(%esp) # 4-byte Folded Reload
+ movl %edx, %edi
+ movl %eax, 16(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ mull 124(%esp) # 4-byte Folded Reload
+ movl %edx, %esi
+ movl %eax, %ebx
+ movl %ecx, %eax
+ mull 128(%esp) # 4-byte Folded Reload
+ movl %eax, %ebp
+ addl %ebx, %edx
+ movl %edx, 20(%esp) # 4-byte Spill
+ adcl 16(%esp), %esi # 4-byte Folded Reload
+ adcl 24(%esp), %edi # 4-byte Folded Reload
+ movl %edi, %eax
+ movl 52(%esp), %ebx # 4-byte Reload
+ adcl 28(%esp), %ebx # 4-byte Folded Reload
+ movl 56(%esp), %edx # 4-byte Reload
+ adcl 32(%esp), %edx # 4-byte Folded Reload
+ movl 60(%esp), %ecx # 4-byte Reload
+ adcl 36(%esp), %ecx # 4-byte Folded Reload
+ movl 64(%esp), %edi # 4-byte Reload
+ adcl $0, %edi
+ addl 88(%esp), %ebp # 4-byte Folded Reload
+ movl 20(%esp), %ebp # 4-byte Reload
+ adcl 44(%esp), %ebp # 4-byte Folded Reload
+ adcl 84(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 36(%esp) # 4-byte Spill
+ adcl 48(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 32(%esp) # 4-byte Spill
+ adcl 80(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 52(%esp) # 4-byte Spill
+ adcl 76(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 56(%esp) # 4-byte Spill
+ adcl 72(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 60(%esp) # 4-byte Spill
+ adcl 68(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 64(%esp) # 4-byte Spill
+ adcl $0, 40(%esp) # 4-byte Folded Spill
+ movl 180(%esp), %eax
+ movl 8(%eax), %ebx
+ movl %ebx, %eax
+ mull 100(%esp) # 4-byte Folded Reload
+ movl %edx, 76(%esp) # 4-byte Spill
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl %ebx, %eax
+ mull 108(%esp) # 4-byte Folded Reload
+ movl %edx, 80(%esp) # 4-byte Spill
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl %ebx, %eax
+ mull 96(%esp) # 4-byte Folded Reload
+ movl %edx, 84(%esp) # 4-byte Spill
+ movl %eax, 24(%esp) # 4-byte Spill
+ movl %ebx, %eax
+ mull 92(%esp) # 4-byte Folded Reload
+ movl %edx, 88(%esp) # 4-byte Spill
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl %ebx, %eax
+ mull 112(%esp) # 4-byte Folded Reload
+ movl %edx, %esi
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl %ebx, %eax
+ mull 116(%esp) # 4-byte Folded Reload
+ movl %edx, %ecx
+ movl %eax, %edi
+ movl %ebx, %eax
+ mull 104(%esp) # 4-byte Folded Reload
+ movl %eax, 72(%esp) # 4-byte Spill
+ addl %edi, %edx
+ movl %edx, %edi
+ adcl 44(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 44(%esp) # 4-byte Spill
+ adcl 48(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 48(%esp) # 4-byte Spill
+ movl 88(%esp), %ecx # 4-byte Reload
+ adcl 24(%esp), %ecx # 4-byte Folded Reload
+ movl 84(%esp), %ebx # 4-byte Reload
+ adcl 28(%esp), %ebx # 4-byte Folded Reload
+ movl 80(%esp), %edx # 4-byte Reload
+ adcl 68(%esp), %edx # 4-byte Folded Reload
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl $0, %eax
+ movl 72(%esp), %esi # 4-byte Reload
+ addl %ebp, %esi
+ movl %esi, 72(%esp) # 4-byte Spill
+ adcl 36(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 36(%esp) # 4-byte Spill
+ movl 32(%esp), %edi # 4-byte Reload
+ adcl %edi, 44(%esp) # 4-byte Folded Spill
+ movl 52(%esp), %edi # 4-byte Reload
+ adcl %edi, 48(%esp) # 4-byte Folded Spill
+ adcl 56(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 88(%esp) # 4-byte Spill
+ adcl 60(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 84(%esp) # 4-byte Spill
+ adcl 64(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 80(%esp) # 4-byte Spill
+ adcl 40(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 76(%esp) # 4-byte Spill
+ sbbl %eax, %eax
+ movl %esi, %ecx
+ imull 132(%esp), %ecx # 4-byte Folded Reload
+ andl $1, %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ mull 120(%esp) # 4-byte Folded Reload
+ movl %edx, 68(%esp) # 4-byte Spill
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ mull 148(%esp) # 4-byte Folded Reload
+ movl %edx, 64(%esp) # 4-byte Spill
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ mull 144(%esp) # 4-byte Folded Reload
+ movl %edx, 60(%esp) # 4-byte Spill
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ mull 140(%esp) # 4-byte Folded Reload
+ movl %edx, 56(%esp) # 4-byte Spill
+ movl %eax, 24(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ mull 136(%esp) # 4-byte Folded Reload
+ movl %edx, %edi
+ movl %eax, 16(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ mull 124(%esp) # 4-byte Folded Reload
+ movl %edx, %esi
+ movl %eax, %ebx
+ movl %ecx, %eax
+ mull 128(%esp) # 4-byte Folded Reload
+ movl %eax, %ebp
+ addl %ebx, %edx
+ movl %edx, 20(%esp) # 4-byte Spill
+ adcl 16(%esp), %esi # 4-byte Folded Reload
+ adcl 24(%esp), %edi # 4-byte Folded Reload
+ movl %edi, %eax
+ movl 56(%esp), %ebx # 4-byte Reload
+ adcl 28(%esp), %ebx # 4-byte Folded Reload
+ movl 60(%esp), %edx # 4-byte Reload
+ adcl 32(%esp), %edx # 4-byte Folded Reload
+ movl 64(%esp), %edi # 4-byte Reload
+ adcl 52(%esp), %edi # 4-byte Folded Reload
+ movl 68(%esp), %ecx # 4-byte Reload
+ adcl $0, %ecx
+ addl 72(%esp), %ebp # 4-byte Folded Reload
+ movl 20(%esp), %ebp # 4-byte Reload
+ adcl 36(%esp), %ebp # 4-byte Folded Reload
+ adcl 44(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 36(%esp) # 4-byte Spill
+ adcl 48(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 44(%esp) # 4-byte Spill
+ adcl 88(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 56(%esp) # 4-byte Spill
+ adcl 84(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 60(%esp) # 4-byte Spill
+ adcl 80(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 64(%esp) # 4-byte Spill
+ adcl 76(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 68(%esp) # 4-byte Spill
+ adcl $0, 40(%esp) # 4-byte Folded Spill
+ movl 180(%esp), %eax
+ movl 12(%eax), %ebx
+ movl %ebx, %eax
+ mull 100(%esp) # 4-byte Folded Reload
+ movl %edx, 76(%esp) # 4-byte Spill
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl %ebx, %eax
+ mull 108(%esp) # 4-byte Folded Reload
+ movl %edx, 80(%esp) # 4-byte Spill
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl %ebx, %eax
+ mull 96(%esp) # 4-byte Folded Reload
+ movl %edx, 84(%esp) # 4-byte Spill
+ movl %eax, 24(%esp) # 4-byte Spill
+ movl %ebx, %eax
+ mull 92(%esp) # 4-byte Folded Reload
+ movl %edx, 88(%esp) # 4-byte Spill
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl %ebx, %eax
+ mull 112(%esp) # 4-byte Folded Reload
+ movl %edx, %esi
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl %ebx, %eax
+ mull 116(%esp) # 4-byte Folded Reload
+ movl %edx, %ecx
+ movl %eax, %edi
+ movl %ebx, %eax
+ mull 104(%esp) # 4-byte Folded Reload
+ movl %eax, 72(%esp) # 4-byte Spill
+ addl %edi, %edx
+ movl %edx, %edi
+ adcl 48(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 48(%esp) # 4-byte Spill
+ adcl 52(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 52(%esp) # 4-byte Spill
+ movl 88(%esp), %ecx # 4-byte Reload
+ adcl 24(%esp), %ecx # 4-byte Folded Reload
+ movl 84(%esp), %ebx # 4-byte Reload
+ adcl 28(%esp), %ebx # 4-byte Folded Reload
+ movl 80(%esp), %edx # 4-byte Reload
+ adcl 32(%esp), %edx # 4-byte Folded Reload
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl $0, %eax
+ movl 72(%esp), %esi # 4-byte Reload
+ addl %ebp, %esi
+ movl %esi, 72(%esp) # 4-byte Spill
+ adcl 36(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 36(%esp) # 4-byte Spill
+ movl 44(%esp), %edi # 4-byte Reload
+ adcl %edi, 48(%esp) # 4-byte Folded Spill
+ movl 56(%esp), %edi # 4-byte Reload
+ adcl %edi, 52(%esp) # 4-byte Folded Spill
+ adcl 60(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 88(%esp) # 4-byte Spill
+ adcl 64(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 84(%esp) # 4-byte Spill
+ adcl 68(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 80(%esp) # 4-byte Spill
+ adcl 40(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 76(%esp) # 4-byte Spill
+ sbbl %eax, %eax
+ movl %esi, %ecx
+ imull 132(%esp), %ecx # 4-byte Folded Reload
+ andl $1, %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ mull 120(%esp) # 4-byte Folded Reload
+ movl %edx, 68(%esp) # 4-byte Spill
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ mull 148(%esp) # 4-byte Folded Reload
+ movl %edx, 64(%esp) # 4-byte Spill
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ mull 144(%esp) # 4-byte Folded Reload
+ movl %edx, 60(%esp) # 4-byte Spill
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ mull 140(%esp) # 4-byte Folded Reload
+ movl %edx, 56(%esp) # 4-byte Spill
+ movl %eax, 24(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ mull 136(%esp) # 4-byte Folded Reload
+ movl %edx, %edi
+ movl %eax, 16(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ mull 124(%esp) # 4-byte Folded Reload
+ movl %edx, %esi
+ movl %eax, %ebx
+ movl %ecx, %eax
+ mull 128(%esp) # 4-byte Folded Reload
+ movl %eax, %ebp
+ addl %ebx, %edx
+ movl %edx, 20(%esp) # 4-byte Spill
+ adcl 16(%esp), %esi # 4-byte Folded Reload
+ adcl 24(%esp), %edi # 4-byte Folded Reload
+ movl %edi, %eax
+ movl 56(%esp), %ebx # 4-byte Reload
+ adcl 28(%esp), %ebx # 4-byte Folded Reload
+ movl 60(%esp), %edx # 4-byte Reload
+ adcl 32(%esp), %edx # 4-byte Folded Reload
+ movl 64(%esp), %edi # 4-byte Reload
+ adcl 40(%esp), %edi # 4-byte Folded Reload
+ movl 68(%esp), %ecx # 4-byte Reload
+ adcl $0, %ecx
+ addl 72(%esp), %ebp # 4-byte Folded Reload
+ movl 20(%esp), %ebp # 4-byte Reload
+ adcl 36(%esp), %ebp # 4-byte Folded Reload
+ adcl 48(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 40(%esp) # 4-byte Spill
+ adcl 52(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 36(%esp) # 4-byte Spill
+ adcl 88(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 56(%esp) # 4-byte Spill
+ adcl 84(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 60(%esp) # 4-byte Spill
+ adcl 80(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 64(%esp) # 4-byte Spill
+ adcl 76(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 68(%esp) # 4-byte Spill
+ adcl $0, 44(%esp) # 4-byte Folded Spill
+ movl 180(%esp), %eax
+ movl 16(%eax), %ebx
+ movl %ebx, %eax
+ mull 100(%esp) # 4-byte Folded Reload
+ movl %edx, 76(%esp) # 4-byte Spill
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl %ebx, %eax
+ mull 108(%esp) # 4-byte Folded Reload
+ movl %edx, 80(%esp) # 4-byte Spill
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl %ebx, %eax
+ mull 96(%esp) # 4-byte Folded Reload
+ movl %edx, 84(%esp) # 4-byte Spill
+ movl %eax, 24(%esp) # 4-byte Spill
+ movl %ebx, %eax
+ mull 92(%esp) # 4-byte Folded Reload
+ movl %edx, 88(%esp) # 4-byte Spill
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl %ebx, %eax
+ mull 112(%esp) # 4-byte Folded Reload
+ movl %edx, %esi
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl %ebx, %eax
+ mull 116(%esp) # 4-byte Folded Reload
+ movl %edx, %ecx
+ movl %eax, %edi
+ movl %ebx, %eax
+ mull 104(%esp) # 4-byte Folded Reload
+ movl %eax, 72(%esp) # 4-byte Spill
+ addl %edi, %edx
+ movl %edx, %edi
+ adcl 48(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 48(%esp) # 4-byte Spill
+ adcl 52(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 52(%esp) # 4-byte Spill
+ movl 88(%esp), %ecx # 4-byte Reload
+ adcl 24(%esp), %ecx # 4-byte Folded Reload
+ movl 84(%esp), %ebx # 4-byte Reload
+ adcl 28(%esp), %ebx # 4-byte Folded Reload
+ movl 80(%esp), %edx # 4-byte Reload
+ adcl 32(%esp), %edx # 4-byte Folded Reload
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl $0, %eax
+ movl 72(%esp), %esi # 4-byte Reload
+ addl %ebp, %esi
+ movl %esi, 72(%esp) # 4-byte Spill
+ adcl 40(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 40(%esp) # 4-byte Spill
+ movl 36(%esp), %edi # 4-byte Reload
+ adcl %edi, 48(%esp) # 4-byte Folded Spill
+ movl 56(%esp), %edi # 4-byte Reload
+ adcl %edi, 52(%esp) # 4-byte Folded Spill
+ adcl 60(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 88(%esp) # 4-byte Spill
+ adcl 64(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 84(%esp) # 4-byte Spill
+ adcl 68(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 80(%esp) # 4-byte Spill
+ adcl 44(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 76(%esp) # 4-byte Spill
+ sbbl %eax, %eax
+ movl %esi, %ecx
+ imull 132(%esp), %ecx # 4-byte Folded Reload
+ andl $1, %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ mull 120(%esp) # 4-byte Folded Reload
+ movl %edx, 68(%esp) # 4-byte Spill
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ mull 148(%esp) # 4-byte Folded Reload
+ movl %edx, 64(%esp) # 4-byte Spill
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ mull 144(%esp) # 4-byte Folded Reload
+ movl %edx, 60(%esp) # 4-byte Spill
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ mull 140(%esp) # 4-byte Folded Reload
+ movl %edx, 56(%esp) # 4-byte Spill
+ movl %eax, 24(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ mull 136(%esp) # 4-byte Folded Reload
+ movl %edx, %edi
+ movl %eax, 16(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ mull 124(%esp) # 4-byte Folded Reload
+ movl %edx, %esi
+ movl %eax, %ebx
+ movl %ecx, %eax
+ mull 128(%esp) # 4-byte Folded Reload
+ movl %eax, %ebp
+ addl %ebx, %edx
+ movl %edx, 20(%esp) # 4-byte Spill
+ adcl 16(%esp), %esi # 4-byte Folded Reload
+ adcl 24(%esp), %edi # 4-byte Folded Reload
+ movl %edi, %eax
+ movl 56(%esp), %ebx # 4-byte Reload
+ adcl 28(%esp), %ebx # 4-byte Folded Reload
+ movl 60(%esp), %edx # 4-byte Reload
+ adcl 32(%esp), %edx # 4-byte Folded Reload
+ movl 64(%esp), %edi # 4-byte Reload
+ adcl 36(%esp), %edi # 4-byte Folded Reload
+ movl 68(%esp), %ecx # 4-byte Reload
+ adcl $0, %ecx
+ addl 72(%esp), %ebp # 4-byte Folded Reload
+ movl 20(%esp), %ebp # 4-byte Reload
+ adcl 40(%esp), %ebp # 4-byte Folded Reload
+ adcl 48(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 40(%esp) # 4-byte Spill
+ adcl 52(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 36(%esp) # 4-byte Spill
+ adcl 88(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 56(%esp) # 4-byte Spill
+ adcl 84(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 60(%esp) # 4-byte Spill
+ adcl 80(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 64(%esp) # 4-byte Spill
+ adcl 76(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 68(%esp) # 4-byte Spill
+ adcl $0, 44(%esp) # 4-byte Folded Spill
+ movl 180(%esp), %eax
+ movl 20(%eax), %ebx
+ movl %ebx, %eax
+ mull 100(%esp) # 4-byte Folded Reload
+ movl %edx, 76(%esp) # 4-byte Spill
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl %ebx, %eax
+ mull 108(%esp) # 4-byte Folded Reload
+ movl %edx, 80(%esp) # 4-byte Spill
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl %ebx, %eax
+ mull 96(%esp) # 4-byte Folded Reload
+ movl %edx, 84(%esp) # 4-byte Spill
+ movl %eax, 24(%esp) # 4-byte Spill
+ movl %ebx, %eax
+ mull 92(%esp) # 4-byte Folded Reload
+ movl %edx, 88(%esp) # 4-byte Spill
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl %ebx, %eax
+ mull 112(%esp) # 4-byte Folded Reload
+ movl %edx, %esi
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl %ebx, %eax
+ mull 116(%esp) # 4-byte Folded Reload
+ movl %edx, %ecx
+ movl %eax, %edi
+ movl %ebx, %eax
+ mull 104(%esp) # 4-byte Folded Reload
+ movl %eax, 72(%esp) # 4-byte Spill
+ addl %edi, %edx
+ movl %edx, %edi
+ adcl 48(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 48(%esp) # 4-byte Spill
+ adcl 52(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 52(%esp) # 4-byte Spill
+ movl 88(%esp), %ecx # 4-byte Reload
+ adcl 24(%esp), %ecx # 4-byte Folded Reload
+ movl 84(%esp), %ebx # 4-byte Reload
+ adcl 28(%esp), %ebx # 4-byte Folded Reload
+ movl 80(%esp), %edx # 4-byte Reload
+ adcl 32(%esp), %edx # 4-byte Folded Reload
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl $0, %eax
+ movl 72(%esp), %esi # 4-byte Reload
+ addl %ebp, %esi
+ movl %esi, 72(%esp) # 4-byte Spill
+ adcl 40(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 40(%esp) # 4-byte Spill
+ movl 36(%esp), %edi # 4-byte Reload
+ adcl %edi, 48(%esp) # 4-byte Folded Spill
+ movl 56(%esp), %edi # 4-byte Reload
+ adcl %edi, 52(%esp) # 4-byte Folded Spill
+ adcl 60(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 88(%esp) # 4-byte Spill
+ adcl 64(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 84(%esp) # 4-byte Spill
+ adcl 68(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 80(%esp) # 4-byte Spill
+ adcl 44(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 76(%esp) # 4-byte Spill
+ sbbl %eax, %eax
+ movl %esi, %ecx
+ imull 132(%esp), %ecx # 4-byte Folded Reload
+ andl $1, %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ mull 120(%esp) # 4-byte Folded Reload
+ movl %edx, 68(%esp) # 4-byte Spill
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ mull 148(%esp) # 4-byte Folded Reload
+ movl %edx, 64(%esp) # 4-byte Spill
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ mull 144(%esp) # 4-byte Folded Reload
+ movl %edx, 60(%esp) # 4-byte Spill
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ mull 140(%esp) # 4-byte Folded Reload
+ movl %edx, 56(%esp) # 4-byte Spill
+ movl %eax, 24(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ mull 136(%esp) # 4-byte Folded Reload
+ movl %edx, %edi
+ movl %eax, 16(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ mull 124(%esp) # 4-byte Folded Reload
+ movl %edx, %esi
+ movl %eax, %ebp
+ movl %ecx, %eax
+ mull 128(%esp) # 4-byte Folded Reload
+ movl %eax, %ebx
+ addl %ebp, %edx
+ movl %edx, 20(%esp) # 4-byte Spill
+ adcl 16(%esp), %esi # 4-byte Folded Reload
+ adcl 24(%esp), %edi # 4-byte Folded Reload
+ movl %edi, %eax
+ movl 56(%esp), %ebp # 4-byte Reload
+ adcl 28(%esp), %ebp # 4-byte Folded Reload
+ movl 60(%esp), %edx # 4-byte Reload
+ adcl 32(%esp), %edx # 4-byte Folded Reload
+ movl 64(%esp), %edi # 4-byte Reload
+ adcl 36(%esp), %edi # 4-byte Folded Reload
+ movl 68(%esp), %ecx # 4-byte Reload
+ adcl $0, %ecx
+ addl 72(%esp), %ebx # 4-byte Folded Reload
+ movl 20(%esp), %ebx # 4-byte Reload
+ adcl 40(%esp), %ebx # 4-byte Folded Reload
+ adcl 48(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 48(%esp) # 4-byte Spill
+ adcl 52(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 72(%esp) # 4-byte Spill
+ adcl 88(%esp), %ebp # 4-byte Folded Reload
+ movl %ebp, 56(%esp) # 4-byte Spill
+ adcl 84(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 60(%esp) # 4-byte Spill
+ adcl 80(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 64(%esp) # 4-byte Spill
+ adcl 76(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 68(%esp) # 4-byte Spill
+ adcl $0, 44(%esp) # 4-byte Folded Spill
+ movl 180(%esp), %eax
+ movl 24(%eax), %ebp
+ movl %ebp, %eax
+ mull 100(%esp) # 4-byte Folded Reload
+ movl %edx, 84(%esp) # 4-byte Spill
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl %ebp, %eax
+ mull 108(%esp) # 4-byte Folded Reload
+ movl %edx, 88(%esp) # 4-byte Spill
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl %ebp, %eax
+ mull 96(%esp) # 4-byte Folded Reload
+ movl %edx, 100(%esp) # 4-byte Spill
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl %ebp, %eax
+ mull 92(%esp) # 4-byte Folded Reload
+ movl %edx, 108(%esp) # 4-byte Spill
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl %ebp, %eax
+ mull 112(%esp) # 4-byte Folded Reload
+ movl %edx, %esi
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl %ebp, %eax
+ mull 116(%esp) # 4-byte Folded Reload
+ movl %edx, %ecx
+ movl %eax, %edi
+ movl %ebp, %eax
+ mull 104(%esp) # 4-byte Folded Reload
+ movl %eax, 116(%esp) # 4-byte Spill
+ addl %edi, %edx
+ movl %edx, %edi
+ adcl 112(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 112(%esp) # 4-byte Spill
+ adcl 92(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 104(%esp) # 4-byte Spill
+ movl 108(%esp), %ecx # 4-byte Reload
+ adcl 96(%esp), %ecx # 4-byte Folded Reload
+ movl 100(%esp), %ebp # 4-byte Reload
+ adcl 76(%esp), %ebp # 4-byte Folded Reload
+ movl 88(%esp), %edx # 4-byte Reload
+ adcl 80(%esp), %edx # 4-byte Folded Reload
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl $0, %eax
+ movl 116(%esp), %esi # 4-byte Reload
+ addl %ebx, %esi
+ movl %esi, 116(%esp) # 4-byte Spill
+ adcl 48(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 92(%esp) # 4-byte Spill
+ movl 72(%esp), %edi # 4-byte Reload
+ adcl %edi, 112(%esp) # 4-byte Folded Spill
+ movl 56(%esp), %edi # 4-byte Reload
+ adcl %edi, 104(%esp) # 4-byte Folded Spill
+ adcl 60(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 108(%esp) # 4-byte Spill
+ adcl 64(%esp), %ebp # 4-byte Folded Reload
+ movl %ebp, 100(%esp) # 4-byte Spill
+ adcl 68(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 88(%esp) # 4-byte Spill
+ adcl 44(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 84(%esp) # 4-byte Spill
+ sbbl %eax, %eax
+ movl 132(%esp), %ecx # 4-byte Reload
+ imull %esi, %ecx
+ movl %ecx, 132(%esp) # 4-byte Spill
+ andl $1, %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ mull 120(%esp) # 4-byte Folded Reload
+ movl %edx, 80(%esp) # 4-byte Spill
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ mull 148(%esp) # 4-byte Folded Reload
+ movl %edx, %esi
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ mull 144(%esp) # 4-byte Folded Reload
+ movl %edx, %ebp
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ mull 140(%esp) # 4-byte Folded Reload
+ movl %edx, %ebx
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ mull 128(%esp) # 4-byte Folded Reload
+ movl %edx, 56(%esp) # 4-byte Spill
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ mull 136(%esp) # 4-byte Folded Reload
+ movl %edx, %ecx
+ movl %eax, %edi
+ movl 132(%esp), %eax # 4-byte Reload
+ mull 124(%esp) # 4-byte Folded Reload
+ addl 56(%esp), %eax # 4-byte Folded Reload
+ adcl %edi, %edx
+ adcl 60(%esp), %ecx # 4-byte Folded Reload
+ adcl 68(%esp), %ebx # 4-byte Folded Reload
+ adcl 72(%esp), %ebp # 4-byte Folded Reload
+ adcl 76(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 132(%esp) # 4-byte Spill
+ movl 80(%esp), %edi # 4-byte Reload
+ adcl $0, %edi
+ movl 64(%esp), %esi # 4-byte Reload
+ addl 116(%esp), %esi # 4-byte Folded Reload
+ adcl 92(%esp), %eax # 4-byte Folded Reload
+ adcl 112(%esp), %edx # 4-byte Folded Reload
+ adcl 104(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 104(%esp) # 4-byte Spill
+ adcl 108(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 116(%esp) # 4-byte Spill
+ adcl 100(%esp), %ebp # 4-byte Folded Reload
+ movl %ebp, 112(%esp) # 4-byte Spill
+ movl 132(%esp), %esi # 4-byte Reload
+ adcl 88(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 132(%esp) # 4-byte Spill
+ adcl 84(%esp), %edi # 4-byte Folded Reload
+ adcl $0, 96(%esp) # 4-byte Folded Spill
+ movl %eax, %esi
+ subl 128(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 108(%esp) # 4-byte Spill
+ movl %edx, %esi
+ sbbl 124(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 124(%esp) # 4-byte Spill
+ sbbl 136(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 128(%esp) # 4-byte Spill
+ sbbl 140(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 136(%esp) # 4-byte Spill
+ sbbl 144(%esp), %ebp # 4-byte Folded Reload
+ movl %ebp, 140(%esp) # 4-byte Spill
+ movl 132(%esp), %ecx # 4-byte Reload
+ movl %ecx, %ebx
+ movl %ecx, %ebp
+ sbbl 148(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 144(%esp) # 4-byte Spill
+ movl %edi, %ebx
+ sbbl 120(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 148(%esp) # 4-byte Spill
+ movl 96(%esp), %ebx # 4-byte Reload
+ sbbl $0, %ebx
+ andl $1, %ebx
+ jne .LBB98_2
+# BB#1:
+ movl 108(%esp), %eax # 4-byte Reload
+.LBB98_2:
+ movl 172(%esp), %esi
+ movl %eax, (%esi)
+ testb %bl, %bl
+ jne .LBB98_4
+# BB#3:
+ movl 124(%esp), %edx # 4-byte Reload
+.LBB98_4:
+ movl %edx, 4(%esi)
+ movl 104(%esp), %ecx # 4-byte Reload
+ jne .LBB98_6
+# BB#5:
+ movl 128(%esp), %ecx # 4-byte Reload
+.LBB98_6:
+ movl %ecx, 8(%esi)
+ movl 112(%esp), %ecx # 4-byte Reload
+ movl 116(%esp), %eax # 4-byte Reload
+ jne .LBB98_8
+# BB#7:
+ movl 136(%esp), %eax # 4-byte Reload
+.LBB98_8:
+ movl %eax, 12(%esi)
+ jne .LBB98_10
+# BB#9:
+ movl 140(%esp), %ecx # 4-byte Reload
+.LBB98_10:
+ movl %ecx, 16(%esi)
+ jne .LBB98_12
+# BB#11:
+ movl 144(%esp), %ebp # 4-byte Reload
+.LBB98_12:
+ movl %ebp, 20(%esi)
+ jne .LBB98_14
+# BB#13:
+ movl 148(%esp), %edi # 4-byte Reload
+.LBB98_14:
+ movl %edi, 24(%esi)
+ addl $152, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end98:
+ .size mcl_fp_mont7L, .Lfunc_end98-mcl_fp_mont7L
+
+ .globl mcl_fp_montNF7L
+ .align 16, 0x90
+ .type mcl_fp_montNF7L,@function
+mcl_fp_montNF7L: # @mcl_fp_montNF7L
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $152, %esp
+ movl 176(%esp), %ebp
+ movl (%ebp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 180(%esp), %ecx
+ movl (%ecx), %ecx
+ mull %ecx
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl %edx, 84(%esp) # 4-byte Spill
+ movl 184(%esp), %esi
+ movl -4(%esi), %edx
+ movl %edx, 108(%esp) # 4-byte Spill
+ movl %eax, %edi
+ imull %edx, %edi
+ movl (%esi), %edx
+ movl %edx, 148(%esp) # 4-byte Spill
+ movl 24(%esi), %edx
+ movl %edx, 124(%esp) # 4-byte Spill
+ movl 20(%esi), %eax
+ movl %eax, 144(%esp) # 4-byte Spill
+ movl 16(%esi), %eax
+ movl %eax, 140(%esp) # 4-byte Spill
+ movl 12(%esi), %eax
+ movl %eax, 136(%esp) # 4-byte Spill
+ movl 8(%esi), %eax
+ movl %eax, 132(%esp) # 4-byte Spill
+ movl 4(%esi), %eax
+ movl %eax, 128(%esp) # 4-byte Spill
+ movl 4(%ebp), %eax
+ movl %eax, 120(%esp) # 4-byte Spill
+ movl 24(%ebp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 20(%ebp), %eax
+ movl %eax, 116(%esp) # 4-byte Spill
+ movl 16(%ebp), %esi
+ movl %esi, 92(%esp) # 4-byte Spill
+ movl 12(%ebp), %ebx
+ movl %ebx, 96(%esp) # 4-byte Spill
+ movl 8(%ebp), %ebp
+ movl %ebp, 100(%esp) # 4-byte Spill
+ movl %edi, %eax
+ mull %edx
+ movl %edx, 68(%esp) # 4-byte Spill
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl %edi, %eax
+ mull 144(%esp) # 4-byte Folded Reload
+ movl %edx, 64(%esp) # 4-byte Spill
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl %edi, %eax
+ mull 140(%esp) # 4-byte Folded Reload
+ movl %edx, 60(%esp) # 4-byte Spill
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl %edi, %eax
+ mull 136(%esp) # 4-byte Folded Reload
+ movl %edx, 52(%esp) # 4-byte Spill
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl %edi, %eax
+ mull 132(%esp) # 4-byte Folded Reload
+ movl %edx, 44(%esp) # 4-byte Spill
+ movl %eax, 24(%esp) # 4-byte Spill
+ movl %edi, %eax
+ mull 128(%esp) # 4-byte Folded Reload
+ movl %edx, 36(%esp) # 4-byte Spill
+ movl %eax, 20(%esp) # 4-byte Spill
+ movl %edi, %eax
+ mull 148(%esp) # 4-byte Folded Reload
+ movl %edx, 28(%esp) # 4-byte Spill
+ movl %eax, 16(%esp) # 4-byte Spill
+ movl 112(%esp), %eax # 4-byte Reload
+ mull %ecx
+ movl %edx, 80(%esp) # 4-byte Spill
+ movl %eax, 12(%esp) # 4-byte Spill
+ movl 116(%esp), %eax # 4-byte Reload
+ mull %ecx
+ movl %edx, 76(%esp) # 4-byte Spill
+ movl %eax, 8(%esp) # 4-byte Spill
+ movl %esi, %eax
+ mull %ecx
+ movl %edx, 72(%esp) # 4-byte Spill
+ movl %eax, 4(%esp) # 4-byte Spill
+ movl %ebx, %eax
+ mull %ecx
+ movl %edx, %esi
+ movl %eax, %ebx
+ movl %ebp, %eax
+ mull %ecx
+ movl %edx, %ebp
+ movl %eax, (%esp) # 4-byte Spill
+ movl 120(%esp), %eax # 4-byte Reload
+ mull %ecx
+ movl %eax, %edi
+ addl 84(%esp), %edi # 4-byte Folded Reload
+ adcl (%esp), %edx # 4-byte Folded Reload
+ movl %edx, 84(%esp) # 4-byte Spill
+ adcl %ebx, %ebp
+ movl %esi, %edx
+ adcl 4(%esp), %edx # 4-byte Folded Reload
+ movl 72(%esp), %esi # 4-byte Reload
+ adcl 8(%esp), %esi # 4-byte Folded Reload
+ movl 76(%esp), %ecx # 4-byte Reload
+ adcl 12(%esp), %ecx # 4-byte Folded Reload
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl $0, %eax
+ movl 16(%esp), %ebx # 4-byte Reload
+ addl 88(%esp), %ebx # 4-byte Folded Reload
+ movl %edi, %ebx
+ adcl 20(%esp), %ebx # 4-byte Folded Reload
+ movl 84(%esp), %edi # 4-byte Reload
+ adcl 24(%esp), %edi # 4-byte Folded Reload
+ adcl 32(%esp), %ebp # 4-byte Folded Reload
+ adcl 40(%esp), %edx # 4-byte Folded Reload
+ adcl 48(%esp), %esi # 4-byte Folded Reload
+ adcl 56(%esp), %ecx # 4-byte Folded Reload
+ adcl $0, %eax
+ addl 28(%esp), %ebx # 4-byte Folded Reload
+ adcl 36(%esp), %edi # 4-byte Folded Reload
+ adcl 44(%esp), %ebp # 4-byte Folded Reload
+ movl %ebp, 40(%esp) # 4-byte Spill
+ adcl 52(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 44(%esp) # 4-byte Spill
+ adcl 60(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 72(%esp) # 4-byte Spill
+ adcl 64(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 76(%esp) # 4-byte Spill
+ adcl 68(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 180(%esp), %eax
+ movl 4(%eax), %ecx
+ movl %ecx, %eax
+ mull 112(%esp) # 4-byte Folded Reload
+ movl %edx, 84(%esp) # 4-byte Spill
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ mull 116(%esp) # 4-byte Folded Reload
+ movl %edx, 88(%esp) # 4-byte Spill
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ mull 92(%esp) # 4-byte Folded Reload
+ movl %edx, 64(%esp) # 4-byte Spill
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ mull 96(%esp) # 4-byte Folded Reload
+ movl %edx, 60(%esp) # 4-byte Spill
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ mull 100(%esp) # 4-byte Folded Reload
+ movl %edx, %esi
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ mull 120(%esp) # 4-byte Folded Reload
+ movl %edx, %ebp
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ mull 104(%esp) # 4-byte Folded Reload
+ movl %eax, 48(%esp) # 4-byte Spill
+ addl 52(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 52(%esp) # 4-byte Spill
+ adcl 28(%esp), %ebp # 4-byte Folded Reload
+ adcl 56(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 56(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 32(%esp), %eax # 4-byte Folded Reload
+ movl 64(%esp), %edx # 4-byte Reload
+ adcl 36(%esp), %edx # 4-byte Folded Reload
+ movl 88(%esp), %ecx # 4-byte Reload
+ adcl 68(%esp), %ecx # 4-byte Folded Reload
+ movl 84(%esp), %esi # 4-byte Reload
+ adcl $0, %esi
+ addl %ebx, 48(%esp) # 4-byte Folded Spill
+ adcl %edi, 52(%esp) # 4-byte Folded Spill
+ movl %ebp, %edi
+ adcl 40(%esp), %edi # 4-byte Folded Reload
+ movl 44(%esp), %ebx # 4-byte Reload
+ adcl %ebx, 56(%esp) # 4-byte Folded Spill
+ adcl 72(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 60(%esp) # 4-byte Spill
+ adcl 76(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 64(%esp) # 4-byte Spill
+ adcl 80(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 88(%esp) # 4-byte Spill
+ adcl $0, %esi
+ movl %esi, 84(%esp) # 4-byte Spill
+ movl 48(%esp), %ebp # 4-byte Reload
+ movl %ebp, %ecx
+ imull 108(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, %eax
+ mull 124(%esp) # 4-byte Folded Reload
+ movl %edx, 44(%esp) # 4-byte Spill
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ mull 144(%esp) # 4-byte Folded Reload
+ movl %edx, 36(%esp) # 4-byte Spill
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ mull 140(%esp) # 4-byte Folded Reload
+ movl %edx, 32(%esp) # 4-byte Spill
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ mull 136(%esp) # 4-byte Folded Reload
+ movl %edx, 28(%esp) # 4-byte Spill
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ mull 132(%esp) # 4-byte Folded Reload
+ movl %edx, 24(%esp) # 4-byte Spill
+ movl %eax, %esi
+ movl %ecx, %eax
+ mull 128(%esp) # 4-byte Folded Reload
+ movl %edx, 20(%esp) # 4-byte Spill
+ movl %eax, %ebx
+ movl %ecx, %eax
+ mull 148(%esp) # 4-byte Folded Reload
+ movl %edx, 40(%esp) # 4-byte Spill
+ addl %ebp, %eax
+ adcl 52(%esp), %ebx # 4-byte Folded Reload
+ adcl %edi, %esi
+ movl %esi, %edi
+ movl 68(%esp), %edx # 4-byte Reload
+ adcl 56(%esp), %edx # 4-byte Folded Reload
+ movl 72(%esp), %esi # 4-byte Reload
+ adcl 60(%esp), %esi # 4-byte Folded Reload
+ movl 76(%esp), %ebp # 4-byte Reload
+ adcl 64(%esp), %ebp # 4-byte Folded Reload
+ movl 80(%esp), %ecx # 4-byte Reload
+ adcl 88(%esp), %ecx # 4-byte Folded Reload
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl $0, %eax
+ addl 40(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 40(%esp) # 4-byte Spill
+ adcl 20(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 64(%esp) # 4-byte Spill
+ adcl 24(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 68(%esp) # 4-byte Spill
+ adcl 28(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 72(%esp) # 4-byte Spill
+ adcl 32(%esp), %ebp # 4-byte Folded Reload
+ movl %ebp, 76(%esp) # 4-byte Spill
+ adcl 36(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 80(%esp) # 4-byte Spill
+ adcl 44(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 180(%esp), %eax
+ movl 8(%eax), %ebp
+ movl %ebp, %eax
+ mull 112(%esp) # 4-byte Folded Reload
+ movl %edx, 88(%esp) # 4-byte Spill
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl %ebp, %eax
+ mull 116(%esp) # 4-byte Folded Reload
+ movl %edx, 60(%esp) # 4-byte Spill
+ movl %eax, %edi
+ movl %ebp, %eax
+ mull 92(%esp) # 4-byte Folded Reload
+ movl %edx, 56(%esp) # 4-byte Spill
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl %ebp, %eax
+ mull 96(%esp) # 4-byte Folded Reload
+ movl %edx, 52(%esp) # 4-byte Spill
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl %ebp, %eax
+ mull 100(%esp) # 4-byte Folded Reload
+ movl %edx, %ecx
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl %ebp, %eax
+ mull 120(%esp) # 4-byte Folded Reload
+ movl %edx, %esi
+ movl %eax, %ebx
+ movl %ebp, %eax
+ mull 104(%esp) # 4-byte Folded Reload
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl %edx, %ebp
+ addl %ebx, %ebp
+ adcl 48(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 48(%esp) # 4-byte Spill
+ adcl 28(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, %ebx
+ movl 52(%esp), %ecx # 4-byte Reload
+ adcl 32(%esp), %ecx # 4-byte Folded Reload
+ movl 56(%esp), %esi # 4-byte Reload
+ adcl %edi, %esi
+ movl 60(%esp), %edx # 4-byte Reload
+ adcl 36(%esp), %edx # 4-byte Folded Reload
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl $0, %eax
+ movl 44(%esp), %edi # 4-byte Reload
+ addl 40(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 44(%esp) # 4-byte Spill
+ adcl 64(%esp), %ebp # 4-byte Folded Reload
+ movl 48(%esp), %edi # 4-byte Reload
+ adcl 68(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 48(%esp) # 4-byte Spill
+ adcl 72(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 40(%esp) # 4-byte Spill
+ adcl 76(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 52(%esp) # 4-byte Spill
+ adcl 80(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 56(%esp) # 4-byte Spill
+ adcl 84(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 60(%esp) # 4-byte Spill
+ adcl $0, %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 44(%esp), %edi # 4-byte Reload
+ movl %edi, %ebx
+ imull 108(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, %eax
+ mull 124(%esp) # 4-byte Folded Reload
+ movl %edx, 84(%esp) # 4-byte Spill
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl %ebx, %eax
+ mull 144(%esp) # 4-byte Folded Reload
+ movl %edx, 36(%esp) # 4-byte Spill
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl %ebx, %eax
+ mull 140(%esp) # 4-byte Folded Reload
+ movl %edx, 32(%esp) # 4-byte Spill
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl %ebx, %eax
+ mull 136(%esp) # 4-byte Folded Reload
+ movl %edx, 28(%esp) # 4-byte Spill
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl %ebx, %eax
+ mull 132(%esp) # 4-byte Folded Reload
+ movl %edx, 24(%esp) # 4-byte Spill
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl %ebx, %eax
+ mull 128(%esp) # 4-byte Folded Reload
+ movl %edx, 20(%esp) # 4-byte Spill
+ movl %eax, %ecx
+ movl %ebx, %eax
+ mull 148(%esp) # 4-byte Folded Reload
+ movl %edx, 16(%esp) # 4-byte Spill
+ addl %edi, %eax
+ adcl %ebp, %ecx
+ movl %ecx, %esi
+ movl 64(%esp), %edx # 4-byte Reload
+ adcl 48(%esp), %edx # 4-byte Folded Reload
+ movl 68(%esp), %ebp # 4-byte Reload
+ adcl 40(%esp), %ebp # 4-byte Folded Reload
+ movl 72(%esp), %ebx # 4-byte Reload
+ adcl 52(%esp), %ebx # 4-byte Folded Reload
+ movl 76(%esp), %edi # 4-byte Reload
+ adcl 56(%esp), %edi # 4-byte Folded Reload
+ movl 80(%esp), %ecx # 4-byte Reload
+ adcl 60(%esp), %ecx # 4-byte Folded Reload
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl $0, %eax
+ addl 16(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 40(%esp) # 4-byte Spill
+ adcl 20(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 64(%esp) # 4-byte Spill
+ adcl 24(%esp), %ebp # 4-byte Folded Reload
+ movl %ebp, 68(%esp) # 4-byte Spill
+ adcl 28(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 72(%esp) # 4-byte Spill
+ adcl 32(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 76(%esp) # 4-byte Spill
+ adcl 36(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 80(%esp) # 4-byte Spill
+ adcl 84(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 180(%esp), %eax
+ movl 12(%eax), %edi
+ movl %edi, %eax
+ mull 112(%esp) # 4-byte Folded Reload
+ movl %edx, 84(%esp) # 4-byte Spill
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl %edi, %eax
+ mull 116(%esp) # 4-byte Folded Reload
+ movl %edx, 60(%esp) # 4-byte Spill
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl %edi, %eax
+ mull 92(%esp) # 4-byte Folded Reload
+ movl %edx, 56(%esp) # 4-byte Spill
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl %edi, %eax
+ mull 96(%esp) # 4-byte Folded Reload
+ movl %edx, 52(%esp) # 4-byte Spill
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl %edi, %eax
+ mull 100(%esp) # 4-byte Folded Reload
+ movl %edx, %ecx
+ movl %eax, 24(%esp) # 4-byte Spill
+ movl %edi, %eax
+ mull 120(%esp) # 4-byte Folded Reload
+ movl %edx, %esi
+ movl %eax, %ebp
+ movl %edi, %eax
+ mull 104(%esp) # 4-byte Folded Reload
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl %edx, %ebx
+ addl %ebp, %ebx
+ adcl 24(%esp), %esi # 4-byte Folded Reload
+ movl %esi, %edi
+ adcl 48(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 48(%esp) # 4-byte Spill
+ movl 52(%esp), %ecx # 4-byte Reload
+ adcl 28(%esp), %ecx # 4-byte Folded Reload
+ movl 56(%esp), %ebp # 4-byte Reload
+ adcl 32(%esp), %ebp # 4-byte Folded Reload
+ movl 60(%esp), %edx # 4-byte Reload
+ adcl 36(%esp), %edx # 4-byte Folded Reload
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl $0, %eax
+ movl 44(%esp), %esi # 4-byte Reload
+ addl 40(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 44(%esp) # 4-byte Spill
+ adcl 64(%esp), %ebx # 4-byte Folded Reload
+ adcl 68(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 40(%esp) # 4-byte Spill
+ movl 48(%esp), %esi # 4-byte Reload
+ adcl 72(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 48(%esp) # 4-byte Spill
+ adcl 76(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 52(%esp) # 4-byte Spill
+ adcl 80(%esp), %ebp # 4-byte Folded Reload
+ movl %ebp, 56(%esp) # 4-byte Spill
+ adcl 88(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 60(%esp) # 4-byte Spill
+ adcl $0, %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 44(%esp), %edi # 4-byte Reload
+ movl %edi, %ebp
+ imull 108(%esp), %ebp # 4-byte Folded Reload
+ movl %ebp, %eax
+ mull 124(%esp) # 4-byte Folded Reload
+ movl %edx, 88(%esp) # 4-byte Spill
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl %ebp, %eax
+ mull 144(%esp) # 4-byte Folded Reload
+ movl %edx, 36(%esp) # 4-byte Spill
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl %ebp, %eax
+ mull 140(%esp) # 4-byte Folded Reload
+ movl %edx, 32(%esp) # 4-byte Spill
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl %ebp, %eax
+ mull 136(%esp) # 4-byte Folded Reload
+ movl %edx, 28(%esp) # 4-byte Spill
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl %ebp, %eax
+ mull 132(%esp) # 4-byte Folded Reload
+ movl %edx, 24(%esp) # 4-byte Spill
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl %ebp, %eax
+ mull 128(%esp) # 4-byte Folded Reload
+ movl %edx, 20(%esp) # 4-byte Spill
+ movl %eax, %esi
+ movl %ebp, %eax
+ mull 148(%esp) # 4-byte Folded Reload
+ movl %edx, 16(%esp) # 4-byte Spill
+ addl %edi, %eax
+ adcl %ebx, %esi
+ movl 64(%esp), %edx # 4-byte Reload
+ adcl 40(%esp), %edx # 4-byte Folded Reload
+ movl 68(%esp), %ebp # 4-byte Reload
+ adcl 48(%esp), %ebp # 4-byte Folded Reload
+ movl 72(%esp), %ebx # 4-byte Reload
+ adcl 52(%esp), %ebx # 4-byte Folded Reload
+ movl 76(%esp), %edi # 4-byte Reload
+ adcl 56(%esp), %edi # 4-byte Folded Reload
+ movl 80(%esp), %ecx # 4-byte Reload
+ adcl 60(%esp), %ecx # 4-byte Folded Reload
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl $0, %eax
+ addl 16(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 40(%esp) # 4-byte Spill
+ adcl 20(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 64(%esp) # 4-byte Spill
+ adcl 24(%esp), %ebp # 4-byte Folded Reload
+ movl %ebp, 68(%esp) # 4-byte Spill
+ adcl 28(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 72(%esp) # 4-byte Spill
+ adcl 32(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 76(%esp) # 4-byte Spill
+ adcl 36(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 80(%esp) # 4-byte Spill
+ adcl 88(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 180(%esp), %eax
+ movl 16(%eax), %ebp
+ movl %ebp, %eax
+ mull 112(%esp) # 4-byte Folded Reload
+ movl %edx, 88(%esp) # 4-byte Spill
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl %ebp, %eax
+ mull 116(%esp) # 4-byte Folded Reload
+ movl %edx, 60(%esp) # 4-byte Spill
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl %ebp, %eax
+ mull 92(%esp) # 4-byte Folded Reload
+ movl %edx, 56(%esp) # 4-byte Spill
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl %ebp, %eax
+ mull 96(%esp) # 4-byte Folded Reload
+ movl %edx, 52(%esp) # 4-byte Spill
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl %ebp, %eax
+ mull 100(%esp) # 4-byte Folded Reload
+ movl %edx, %ecx
+ movl %eax, %edi
+ movl %ebp, %eax
+ mull 120(%esp) # 4-byte Folded Reload
+ movl %edx, %esi
+ movl %eax, %ebx
+ movl %ebp, %eax
+ mull 104(%esp) # 4-byte Folded Reload
+ movl %eax, 24(%esp) # 4-byte Spill
+ addl %ebx, %edx
+ movl %edx, 44(%esp) # 4-byte Spill
+ adcl %edi, %esi
+ movl %esi, %edi
+ adcl 48(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 48(%esp) # 4-byte Spill
+ movl 52(%esp), %ebp # 4-byte Reload
+ adcl 28(%esp), %ebp # 4-byte Folded Reload
+ movl 56(%esp), %ebx # 4-byte Reload
+ adcl 32(%esp), %ebx # 4-byte Folded Reload
+ movl 60(%esp), %edx # 4-byte Reload
+ adcl 36(%esp), %edx # 4-byte Folded Reload
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl $0, %eax
+ movl 24(%esp), %ecx # 4-byte Reload
+ addl 40(%esp), %ecx # 4-byte Folded Reload
+ movl 44(%esp), %esi # 4-byte Reload
+ adcl 64(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 44(%esp) # 4-byte Spill
+ adcl 68(%esp), %edi # 4-byte Folded Reload
+ movl 48(%esp), %esi # 4-byte Reload
+ adcl 72(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 48(%esp) # 4-byte Spill
+ adcl 76(%esp), %ebp # 4-byte Folded Reload
+ movl %ebp, 52(%esp) # 4-byte Spill
+ adcl 80(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 56(%esp) # 4-byte Spill
+ adcl 84(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 60(%esp) # 4-byte Spill
+ adcl $0, %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl %ecx, %esi
+ imull 108(%esp), %esi # 4-byte Folded Reload
+ movl %esi, %eax
+ mull 124(%esp) # 4-byte Folded Reload
+ movl %edx, 84(%esp) # 4-byte Spill
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl %esi, %eax
+ mull 144(%esp) # 4-byte Folded Reload
+ movl %edx, 64(%esp) # 4-byte Spill
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl %esi, %eax
+ mull 140(%esp) # 4-byte Folded Reload
+ movl %edx, 40(%esp) # 4-byte Spill
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl %esi, %eax
+ mull 136(%esp) # 4-byte Folded Reload
+ movl %edx, 36(%esp) # 4-byte Spill
+ movl %eax, %ebp
+ movl %esi, %eax
+ mull 132(%esp) # 4-byte Folded Reload
+ movl %edx, 32(%esp) # 4-byte Spill
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl %esi, %eax
+ mull 128(%esp) # 4-byte Folded Reload
+ movl %edx, 28(%esp) # 4-byte Spill
+ movl %eax, %ebx
+ movl %esi, %eax
+ mull 148(%esp) # 4-byte Folded Reload
+ movl %edx, 24(%esp) # 4-byte Spill
+ addl %ecx, %eax
+ adcl 44(%esp), %ebx # 4-byte Folded Reload
+ movl 68(%esp), %edx # 4-byte Reload
+ adcl %edi, %edx
+ adcl 48(%esp), %ebp # 4-byte Folded Reload
+ movl 72(%esp), %edi # 4-byte Reload
+ adcl 52(%esp), %edi # 4-byte Folded Reload
+ movl 76(%esp), %esi # 4-byte Reload
+ adcl 56(%esp), %esi # 4-byte Folded Reload
+ movl 80(%esp), %ecx # 4-byte Reload
+ adcl 60(%esp), %ecx # 4-byte Folded Reload
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl $0, %eax
+ addl 24(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 44(%esp) # 4-byte Spill
+ adcl 28(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 68(%esp) # 4-byte Spill
+ adcl 32(%esp), %ebp # 4-byte Folded Reload
+ movl %ebp, 48(%esp) # 4-byte Spill
+ adcl 36(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 72(%esp) # 4-byte Spill
+ adcl 40(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 76(%esp) # 4-byte Spill
+ adcl 64(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 80(%esp) # 4-byte Spill
+ adcl 84(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 180(%esp), %eax
+ movl 20(%eax), %ebp
+ movl %ebp, %eax
+ mull 112(%esp) # 4-byte Folded Reload
+ movl %edx, 84(%esp) # 4-byte Spill
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl %ebp, %eax
+ mull 116(%esp) # 4-byte Folded Reload
+ movl %edx, 64(%esp) # 4-byte Spill
+ movl %eax, %edi
+ movl %ebp, %eax
+ mull 92(%esp) # 4-byte Folded Reload
+ movl %edx, 36(%esp) # 4-byte Spill
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl %ebp, %eax
+ mull 96(%esp) # 4-byte Folded Reload
+ movl %edx, 60(%esp) # 4-byte Spill
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl %ebp, %eax
+ mull 100(%esp) # 4-byte Folded Reload
+ movl %edx, %esi
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl %ebp, %eax
+ mull 120(%esp) # 4-byte Folded Reload
+ movl %edx, %ecx
+ movl %eax, %ebx
+ movl %ebp, %eax
+ mull 104(%esp) # 4-byte Folded Reload
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl %edx, %ebp
+ addl %ebx, %ebp
+ adcl 56(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 56(%esp) # 4-byte Spill
+ adcl 28(%esp), %esi # 4-byte Folded Reload
+ movl %esi, %ebx
+ movl 60(%esp), %esi # 4-byte Reload
+ adcl 32(%esp), %esi # 4-byte Folded Reload
+ movl 36(%esp), %edx # 4-byte Reload
+ adcl %edi, %edx
+ movl 64(%esp), %ecx # 4-byte Reload
+ adcl 40(%esp), %ecx # 4-byte Folded Reload
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl $0, %eax
+ movl 52(%esp), %edi # 4-byte Reload
+ addl 44(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 52(%esp) # 4-byte Spill
+ adcl 68(%esp), %ebp # 4-byte Folded Reload
+ movl 56(%esp), %edi # 4-byte Reload
+ adcl 48(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 56(%esp) # 4-byte Spill
+ adcl 72(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 40(%esp) # 4-byte Spill
+ adcl 76(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 60(%esp) # 4-byte Spill
+ adcl 80(%esp), %edx # 4-byte Folded Reload
+ movl %edx, %esi
+ adcl 88(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 64(%esp) # 4-byte Spill
+ adcl $0, %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 52(%esp), %edi # 4-byte Reload
+ movl %edi, %ebx
+ imull 108(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, %eax
+ mull 124(%esp) # 4-byte Folded Reload
+ movl %edx, 48(%esp) # 4-byte Spill
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl %ebx, %eax
+ mull 144(%esp) # 4-byte Folded Reload
+ movl %edx, 44(%esp) # 4-byte Spill
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl %ebx, %eax
+ mull 140(%esp) # 4-byte Folded Reload
+ movl %edx, 36(%esp) # 4-byte Spill
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl %ebx, %eax
+ mull 136(%esp) # 4-byte Folded Reload
+ movl %edx, 32(%esp) # 4-byte Spill
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl %ebx, %eax
+ mull 132(%esp) # 4-byte Folded Reload
+ movl %edx, 28(%esp) # 4-byte Spill
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl %ebx, %eax
+ mull 128(%esp) # 4-byte Folded Reload
+ movl %edx, 24(%esp) # 4-byte Spill
+ movl %eax, %ecx
+ movl %ebx, %eax
+ mull 148(%esp) # 4-byte Folded Reload
+ movl %edx, 20(%esp) # 4-byte Spill
+ addl %edi, %eax
+ adcl %ebp, %ecx
+ movl %ecx, %edx
+ movl 68(%esp), %ecx # 4-byte Reload
+ adcl 56(%esp), %ecx # 4-byte Folded Reload
+ movl 72(%esp), %ebp # 4-byte Reload
+ adcl 40(%esp), %ebp # 4-byte Folded Reload
+ movl 76(%esp), %ebx # 4-byte Reload
+ adcl 60(%esp), %ebx # 4-byte Folded Reload
+ movl 80(%esp), %edi # 4-byte Reload
+ adcl %esi, %edi
+ movl 88(%esp), %esi # 4-byte Reload
+ adcl 64(%esp), %esi # 4-byte Folded Reload
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl $0, %eax
+ addl 20(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 60(%esp) # 4-byte Spill
+ adcl 24(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 68(%esp) # 4-byte Spill
+ adcl 28(%esp), %ebp # 4-byte Folded Reload
+ movl %ebp, 72(%esp) # 4-byte Spill
+ adcl 32(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 76(%esp) # 4-byte Spill
+ adcl 36(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 80(%esp) # 4-byte Spill
+ adcl 44(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 88(%esp) # 4-byte Spill
+ adcl 48(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 180(%esp), %eax
+ movl 24(%eax), %edi
+ movl %edi, %eax
+ mull 112(%esp) # 4-byte Folded Reload
+ movl %edx, 112(%esp) # 4-byte Spill
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl %edi, %eax
+ mull 116(%esp) # 4-byte Folded Reload
+ movl %edx, 64(%esp) # 4-byte Spill
+ movl %eax, 116(%esp) # 4-byte Spill
+ movl %edi, %eax
+ mull 92(%esp) # 4-byte Folded Reload
+ movl %edx, 92(%esp) # 4-byte Spill
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl %edi, %eax
+ mull 96(%esp) # 4-byte Folded Reload
+ movl %edx, 96(%esp) # 4-byte Spill
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl %edi, %eax
+ mull 100(%esp) # 4-byte Folded Reload
+ movl %edx, 100(%esp) # 4-byte Spill
+ movl %eax, %esi
+ movl %edi, %eax
+ mull 120(%esp) # 4-byte Folded Reload
+ movl %edx, %ecx
+ movl %eax, %ebp
+ movl %edi, %eax
+ mull 104(%esp) # 4-byte Folded Reload
+ movl %eax, %ebx
+ addl %ebp, %edx
+ movl %edx, 104(%esp) # 4-byte Spill
+ movl %ecx, %edi
+ adcl %esi, %edi
+ movl 100(%esp), %esi # 4-byte Reload
+ adcl 48(%esp), %esi # 4-byte Folded Reload
+ movl 96(%esp), %ecx # 4-byte Reload
+ adcl 52(%esp), %ecx # 4-byte Folded Reload
+ movl 92(%esp), %edx # 4-byte Reload
+ adcl 116(%esp), %edx # 4-byte Folded Reload
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 56(%esp), %eax # 4-byte Folded Reload
+ movl 112(%esp), %ebp # 4-byte Reload
+ adcl $0, %ebp
+ addl 60(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 48(%esp) # 4-byte Spill
+ movl 68(%esp), %ebx # 4-byte Reload
+ adcl %ebx, 104(%esp) # 4-byte Folded Spill
+ adcl 72(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 72(%esp) # 4-byte Spill
+ adcl 76(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 100(%esp) # 4-byte Spill
+ adcl 80(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 96(%esp) # 4-byte Spill
+ adcl 88(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 92(%esp) # 4-byte Spill
+ adcl 84(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 64(%esp) # 4-byte Spill
+ adcl $0, %ebp
+ movl %ebp, 112(%esp) # 4-byte Spill
+ movl 108(%esp), %edi # 4-byte Reload
+ movl 48(%esp), %ecx # 4-byte Reload
+ imull %ecx, %edi
+ movl %edi, %eax
+ mull 124(%esp) # 4-byte Folded Reload
+ movl %edx, 88(%esp) # 4-byte Spill
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl %edi, %eax
+ mull 144(%esp) # 4-byte Folded Reload
+ movl %edx, 84(%esp) # 4-byte Spill
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl %edi, %eax
+ mull 140(%esp) # 4-byte Folded Reload
+ movl %edx, 80(%esp) # 4-byte Spill
+ movl %eax, 120(%esp) # 4-byte Spill
+ movl %edi, %eax
+ mull 136(%esp) # 4-byte Folded Reload
+ movl %edx, 68(%esp) # 4-byte Spill
+ movl %eax, 116(%esp) # 4-byte Spill
+ movl %edi, %eax
+ mull 148(%esp) # 4-byte Folded Reload
+ movl %edx, 60(%esp) # 4-byte Spill
+ movl %eax, %ebp
+ movl %edi, %eax
+ mull 132(%esp) # 4-byte Folded Reload
+ movl %edx, 56(%esp) # 4-byte Spill
+ movl %eax, %esi
+ movl %edi, %eax
+ mull 128(%esp) # 4-byte Folded Reload
+ movl %edx, 52(%esp) # 4-byte Spill
+ addl %ecx, %ebp
+ adcl 104(%esp), %eax # 4-byte Folded Reload
+ movl %eax, %edx
+ adcl 72(%esp), %esi # 4-byte Folded Reload
+ movl 116(%esp), %ecx # 4-byte Reload
+ adcl 100(%esp), %ecx # 4-byte Folded Reload
+ movl 120(%esp), %eax # 4-byte Reload
+ adcl 96(%esp), %eax # 4-byte Folded Reload
+ movl 108(%esp), %ebx # 4-byte Reload
+ adcl 92(%esp), %ebx # 4-byte Folded Reload
+ movl 76(%esp), %ebp # 4-byte Reload
+ adcl 64(%esp), %ebp # 4-byte Folded Reload
+ movl 112(%esp), %edi # 4-byte Reload
+ adcl $0, %edi
+ addl 60(%esp), %edx # 4-byte Folded Reload
+ adcl 52(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 104(%esp) # 4-byte Spill
+ adcl 56(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 116(%esp) # 4-byte Spill
+ adcl 68(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 120(%esp) # 4-byte Spill
+ adcl 80(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 108(%esp) # 4-byte Spill
+ adcl 84(%esp), %ebp # 4-byte Folded Reload
+ adcl 88(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 112(%esp) # 4-byte Spill
+ movl %edx, %eax
+ subl 148(%esp), %eax # 4-byte Folded Reload
+ sbbl 128(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 128(%esp) # 4-byte Spill
+ sbbl 132(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 132(%esp) # 4-byte Spill
+ movl 120(%esp), %ecx # 4-byte Reload
+ movl %edx, %esi
+ sbbl 136(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 136(%esp) # 4-byte Spill
+ sbbl 140(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 148(%esp) # 4-byte Spill
+ movl %ebp, %ebx
+ movl %ebp, %ecx
+ movl %ebx, %ebp
+ sbbl 144(%esp), %ebp # 4-byte Folded Reload
+ movl %edi, %ebx
+ sbbl 124(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, %edi
+ sarl $31, %edi
+ testl %edi, %edi
+ js .LBB99_2
+# BB#1:
+ movl %eax, %esi
+.LBB99_2:
+ movl 172(%esp), %edx
+ movl %esi, (%edx)
+ movl 104(%esp), %eax # 4-byte Reload
+ js .LBB99_4
+# BB#3:
+ movl 128(%esp), %eax # 4-byte Reload
+.LBB99_4:
+ movl %eax, 4(%edx)
+ movl %ecx, %eax
+ movl 116(%esp), %ecx # 4-byte Reload
+ js .LBB99_6
+# BB#5:
+ movl 132(%esp), %ecx # 4-byte Reload
+.LBB99_6:
+ movl %ecx, 8(%edx)
+ movl 108(%esp), %esi # 4-byte Reload
+ movl 120(%esp), %ecx # 4-byte Reload
+ js .LBB99_8
+# BB#7:
+ movl 136(%esp), %ecx # 4-byte Reload
+.LBB99_8:
+ movl %ecx, 12(%edx)
+ js .LBB99_10
+# BB#9:
+ movl 148(%esp), %esi # 4-byte Reload
+.LBB99_10:
+ movl %esi, 16(%edx)
+ js .LBB99_12
+# BB#11:
+ movl %ebp, %eax
+.LBB99_12:
+ movl %eax, 20(%edx)
+ js .LBB99_14
+# BB#13:
+ movl %ebx, 112(%esp) # 4-byte Spill
+.LBB99_14:
+ movl 112(%esp), %eax # 4-byte Reload
+ movl %eax, 24(%edx)
+ addl $152, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end99:
+ .size mcl_fp_montNF7L, .Lfunc_end99-mcl_fp_montNF7L
+
+ .globl mcl_fp_montRed7L
+ .align 16, 0x90
+ .type mcl_fp_montRed7L,@function
+mcl_fp_montRed7L: # @mcl_fp_montRed7L
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $124, %esp
+ movl 152(%esp), %eax
+ movl -4(%eax), %edx
+ movl %edx, 88(%esp) # 4-byte Spill
+ movl (%eax), %ecx
+ movl %ecx, 112(%esp) # 4-byte Spill
+ movl 148(%esp), %ecx
+ movl (%ecx), %ecx
+ movl %ecx, 116(%esp) # 4-byte Spill
+ imull %edx, %ecx
+ movl 24(%eax), %edi
+ movl %edi, 92(%esp) # 4-byte Spill
+ movl 20(%eax), %edx
+ movl %edx, 120(%esp) # 4-byte Spill
+ movl 16(%eax), %ebp
+ movl %ebp, 104(%esp) # 4-byte Spill
+ movl 12(%eax), %edx
+ movl %edx, 108(%esp) # 4-byte Spill
+ movl 8(%eax), %esi
+ movl %esi, 100(%esp) # 4-byte Spill
+ movl 4(%eax), %ebx
+ movl %ebx, 96(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ mull %edi
+ movl %edx, 64(%esp) # 4-byte Spill
+ movl %eax, %edi
+ movl %ecx, %eax
+ mull 120(%esp) # 4-byte Folded Reload
+ movl %edx, 60(%esp) # 4-byte Spill
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ mull %ebp
+ movl %edx, 56(%esp) # 4-byte Spill
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ mull 108(%esp) # 4-byte Folded Reload
+ movl %edx, %ebp
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ mull %esi
+ movl %edx, 52(%esp) # 4-byte Spill
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ mull %ebx
+ movl %edx, %esi
+ movl %eax, %ebx
+ movl %ecx, %eax
+ mull 112(%esp) # 4-byte Folded Reload
+ addl %ebx, %edx
+ movl %edx, 72(%esp) # 4-byte Spill
+ adcl 68(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 32(%esp) # 4-byte Spill
+ movl 52(%esp), %edx # 4-byte Reload
+ adcl 76(%esp), %edx # 4-byte Folded Reload
+ adcl 80(%esp), %ebp # 4-byte Folded Reload
+ movl 56(%esp), %ebx # 4-byte Reload
+ adcl 84(%esp), %ebx # 4-byte Folded Reload
+ movl 60(%esp), %esi # 4-byte Reload
+ adcl %edi, %esi
+ movl 64(%esp), %edi # 4-byte Reload
+ adcl $0, %edi
+ addl 116(%esp), %eax # 4-byte Folded Reload
+ movl 148(%esp), %ecx
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 4(%ecx), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 32(%esp), %eax # 4-byte Reload
+ adcl 8(%ecx), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ adcl 12(%ecx), %edx
+ movl %edx, 52(%esp) # 4-byte Spill
+ adcl 16(%ecx), %ebp
+ movl %ebp, 20(%esp) # 4-byte Spill
+ adcl 20(%ecx), %ebx
+ movl %ebx, 56(%esp) # 4-byte Spill
+ adcl 24(%ecx), %esi
+ movl %esi, 60(%esp) # 4-byte Spill
+ adcl 28(%ecx), %edi
+ movl %edi, 64(%esp) # 4-byte Spill
+ movl 52(%ecx), %esi
+ movl 48(%ecx), %edi
+ movl 44(%ecx), %edx
+ movl 40(%ecx), %ebx
+ movl 36(%ecx), %ebp
+ movl 32(%ecx), %eax
+ adcl $0, %eax
+ movl %eax, 12(%esp) # 4-byte Spill
+ adcl $0, %ebp
+ movl %ebp, 24(%esp) # 4-byte Spill
+ adcl $0, %ebx
+ movl %ebx, 68(%esp) # 4-byte Spill
+ adcl $0, %edx
+ movl %edx, 76(%esp) # 4-byte Spill
+ adcl $0, %edi
+ movl %edi, 80(%esp) # 4-byte Spill
+ adcl $0, %esi
+ movl %esi, 84(%esp) # 4-byte Spill
+ sbbl %eax, %eax
+ andl $1, %eax
+ movl %eax, 116(%esp) # 4-byte Spill
+ movl 72(%esp), %ecx # 4-byte Reload
+ imull 88(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, %eax
+ mull 92(%esp) # 4-byte Folded Reload
+ movl %edx, 48(%esp) # 4-byte Spill
+ movl %eax, 16(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ mull 120(%esp) # 4-byte Folded Reload
+ movl %edx, 44(%esp) # 4-byte Spill
+ movl %eax, 8(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ mull 104(%esp) # 4-byte Folded Reload
+ movl %edx, 40(%esp) # 4-byte Spill
+ movl %eax, 4(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ mull 108(%esp) # 4-byte Folded Reload
+ movl %edx, 36(%esp) # 4-byte Spill
+ movl %eax, (%esp) # 4-byte Spill
+ movl %ecx, %eax
+ mull 100(%esp) # 4-byte Folded Reload
+ movl %edx, %esi
+ movl %eax, %ebp
+ movl %ecx, %eax
+ mull 96(%esp) # 4-byte Folded Reload
+ movl %edx, %edi
+ movl %eax, %ebx
+ movl %ecx, %eax
+ mull 112(%esp) # 4-byte Folded Reload
+ movl %eax, %ecx
+ addl %ebx, %edx
+ movl %edx, 28(%esp) # 4-byte Spill
+ adcl %ebp, %edi
+ movl %esi, %eax
+ adcl (%esp), %eax # 4-byte Folded Reload
+ movl 36(%esp), %esi # 4-byte Reload
+ adcl 4(%esp), %esi # 4-byte Folded Reload
+ movl 40(%esp), %ebp # 4-byte Reload
+ adcl 8(%esp), %ebp # 4-byte Folded Reload
+ movl 44(%esp), %ebx # 4-byte Reload
+ adcl 16(%esp), %ebx # 4-byte Folded Reload
+ movl 48(%esp), %edx # 4-byte Reload
+ adcl $0, %edx
+ addl 72(%esp), %ecx # 4-byte Folded Reload
+ movl 28(%esp), %ecx # 4-byte Reload
+ adcl 32(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 28(%esp) # 4-byte Spill
+ adcl 52(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 16(%esp) # 4-byte Spill
+ adcl 20(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 52(%esp) # 4-byte Spill
+ adcl 56(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 36(%esp) # 4-byte Spill
+ adcl 60(%esp), %ebp # 4-byte Folded Reload
+ movl %ebp, 40(%esp) # 4-byte Spill
+ adcl 64(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 44(%esp) # 4-byte Spill
+ adcl 12(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 48(%esp) # 4-byte Spill
+ adcl $0, 24(%esp) # 4-byte Folded Spill
+ adcl $0, 68(%esp) # 4-byte Folded Spill
+ adcl $0, 76(%esp) # 4-byte Folded Spill
+ adcl $0, 80(%esp) # 4-byte Folded Spill
+ adcl $0, 84(%esp) # 4-byte Folded Spill
+ adcl $0, 116(%esp) # 4-byte Folded Spill
+ movl %ecx, %edi
+ imull 88(%esp), %edi # 4-byte Folded Reload
+ movl %edi, %eax
+ mull 92(%esp) # 4-byte Folded Reload
+ movl %edx, 72(%esp) # 4-byte Spill
+ movl %eax, 20(%esp) # 4-byte Spill
+ movl %edi, %eax
+ mull 120(%esp) # 4-byte Folded Reload
+ movl %edx, 64(%esp) # 4-byte Spill
+ movl %eax, 12(%esp) # 4-byte Spill
+ movl %edi, %eax
+ mull 104(%esp) # 4-byte Folded Reload
+ movl %edx, 60(%esp) # 4-byte Spill
+ movl %eax, 8(%esp) # 4-byte Spill
+ movl %edi, %eax
+ mull 108(%esp) # 4-byte Folded Reload
+ movl %edx, 56(%esp) # 4-byte Spill
+ movl %eax, 4(%esp) # 4-byte Spill
+ movl %edi, %eax
+ mull 100(%esp) # 4-byte Folded Reload
+ movl %edx, %ecx
+ movl %eax, %ebx
+ movl %edi, %eax
+ mull 96(%esp) # 4-byte Folded Reload
+ movl %edx, %ebp
+ movl %eax, %esi
+ movl %edi, %eax
+ mull 112(%esp) # 4-byte Folded Reload
+ movl %eax, %edi
+ addl %esi, %edx
+ movl %edx, 32(%esp) # 4-byte Spill
+ adcl %ebx, %ebp
+ movl %ebp, %eax
+ adcl 4(%esp), %ecx # 4-byte Folded Reload
+ movl 56(%esp), %ebp # 4-byte Reload
+ adcl 8(%esp), %ebp # 4-byte Folded Reload
+ movl 60(%esp), %ebx # 4-byte Reload
+ adcl 12(%esp), %ebx # 4-byte Folded Reload
+ movl 64(%esp), %esi # 4-byte Reload
+ adcl 20(%esp), %esi # 4-byte Folded Reload
+ movl 72(%esp), %edx # 4-byte Reload
+ adcl $0, %edx
+ addl 28(%esp), %edi # 4-byte Folded Reload
+ movl 32(%esp), %edi # 4-byte Reload
+ adcl 16(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 32(%esp) # 4-byte Spill
+ adcl 52(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 16(%esp) # 4-byte Spill
+ adcl 36(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 20(%esp) # 4-byte Spill
+ adcl 40(%esp), %ebp # 4-byte Folded Reload
+ movl %ebp, 56(%esp) # 4-byte Spill
+ adcl 44(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 60(%esp) # 4-byte Spill
+ adcl 48(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 64(%esp) # 4-byte Spill
+ adcl 24(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 72(%esp) # 4-byte Spill
+ adcl $0, 68(%esp) # 4-byte Folded Spill
+ adcl $0, 76(%esp) # 4-byte Folded Spill
+ adcl $0, 80(%esp) # 4-byte Folded Spill
+ adcl $0, 84(%esp) # 4-byte Folded Spill
+ adcl $0, 116(%esp) # 4-byte Folded Spill
+ movl %edi, %esi
+ imull 88(%esp), %esi # 4-byte Folded Reload
+ movl %esi, %eax
+ mull 92(%esp) # 4-byte Folded Reload
+ movl %edx, 52(%esp) # 4-byte Spill
+ movl %eax, 24(%esp) # 4-byte Spill
+ movl %esi, %eax
+ mull 120(%esp) # 4-byte Folded Reload
+ movl %edx, 48(%esp) # 4-byte Spill
+ movl %eax, 12(%esp) # 4-byte Spill
+ movl %esi, %eax
+ mull 104(%esp) # 4-byte Folded Reload
+ movl %edx, 44(%esp) # 4-byte Spill
+ movl %eax, 8(%esp) # 4-byte Spill
+ movl %esi, %eax
+ mull 108(%esp) # 4-byte Folded Reload
+ movl %edx, 40(%esp) # 4-byte Spill
+ movl %eax, 4(%esp) # 4-byte Spill
+ movl %esi, %eax
+ mull 100(%esp) # 4-byte Folded Reload
+ movl %edx, 36(%esp) # 4-byte Spill
+ movl %eax, %ecx
+ movl %esi, %eax
+ mull 96(%esp) # 4-byte Folded Reload
+ movl %edx, %ebx
+ movl %eax, %ebp
+ movl %esi, %eax
+ mull 112(%esp) # 4-byte Folded Reload
+ movl %eax, %edi
+ addl %ebp, %edx
+ movl %edx, 28(%esp) # 4-byte Spill
+ adcl %ecx, %ebx
+ movl %ebx, %esi
+ movl 36(%esp), %eax # 4-byte Reload
+ adcl 4(%esp), %eax # 4-byte Folded Reload
+ movl 40(%esp), %ebx # 4-byte Reload
+ adcl 8(%esp), %ebx # 4-byte Folded Reload
+ movl 44(%esp), %ebp # 4-byte Reload
+ adcl 12(%esp), %ebp # 4-byte Folded Reload
+ movl 48(%esp), %edx # 4-byte Reload
+ adcl 24(%esp), %edx # 4-byte Folded Reload
+ movl 52(%esp), %ecx # 4-byte Reload
+ adcl $0, %ecx
+ addl 32(%esp), %edi # 4-byte Folded Reload
+ movl 28(%esp), %edi # 4-byte Reload
+ adcl 16(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 28(%esp) # 4-byte Spill
+ adcl 20(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 24(%esp) # 4-byte Spill
+ adcl 56(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 36(%esp) # 4-byte Spill
+ adcl 60(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 40(%esp) # 4-byte Spill
+ adcl 64(%esp), %ebp # 4-byte Folded Reload
+ movl %ebp, 44(%esp) # 4-byte Spill
+ adcl 72(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 48(%esp) # 4-byte Spill
+ adcl 68(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 52(%esp) # 4-byte Spill
+ adcl $0, 76(%esp) # 4-byte Folded Spill
+ adcl $0, 80(%esp) # 4-byte Folded Spill
+ adcl $0, 84(%esp) # 4-byte Folded Spill
+ adcl $0, 116(%esp) # 4-byte Folded Spill
+ movl %edi, %ebp
+ imull 88(%esp), %ebp # 4-byte Folded Reload
+ movl %ebp, %eax
+ mull 92(%esp) # 4-byte Folded Reload
+ movl %edx, 72(%esp) # 4-byte Spill
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl %ebp, %eax
+ mull 120(%esp) # 4-byte Folded Reload
+ movl %edx, 68(%esp) # 4-byte Spill
+ movl %eax, 20(%esp) # 4-byte Spill
+ movl %ebp, %eax
+ mull 104(%esp) # 4-byte Folded Reload
+ movl %edx, 64(%esp) # 4-byte Spill
+ movl %eax, 16(%esp) # 4-byte Spill
+ movl %ebp, %eax
+ mull 108(%esp) # 4-byte Folded Reload
+ movl %edx, 60(%esp) # 4-byte Spill
+ movl %eax, 12(%esp) # 4-byte Spill
+ movl %ebp, %eax
+ mull 100(%esp) # 4-byte Folded Reload
+ movl %edx, %esi
+ movl %eax, %ecx
+ movl %ebp, %eax
+ mull 96(%esp) # 4-byte Folded Reload
+ movl %edx, %edi
+ movl %eax, %ebx
+ movl %ebp, %eax
+ mull 112(%esp) # 4-byte Folded Reload
+ movl %eax, %ebp
+ addl %ebx, %edx
+ movl %edx, 32(%esp) # 4-byte Spill
+ adcl %ecx, %edi
+ movl %edi, %ecx
+ adcl 12(%esp), %esi # 4-byte Folded Reload
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 16(%esp), %eax # 4-byte Folded Reload
+ movl 64(%esp), %ebx # 4-byte Reload
+ adcl 20(%esp), %ebx # 4-byte Folded Reload
+ movl 68(%esp), %edi # 4-byte Reload
+ adcl 56(%esp), %edi # 4-byte Folded Reload
+ movl 72(%esp), %edx # 4-byte Reload
+ adcl $0, %edx
+ addl 28(%esp), %ebp # 4-byte Folded Reload
+ movl 32(%esp), %ebp # 4-byte Reload
+ adcl 24(%esp), %ebp # 4-byte Folded Reload
+ movl %ebp, 32(%esp) # 4-byte Spill
+ adcl 36(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 36(%esp) # 4-byte Spill
+ adcl 40(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 40(%esp) # 4-byte Spill
+ adcl 44(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 60(%esp) # 4-byte Spill
+ adcl 48(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 64(%esp) # 4-byte Spill
+ adcl 52(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 68(%esp) # 4-byte Spill
+ adcl 76(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 72(%esp) # 4-byte Spill
+ adcl $0, 80(%esp) # 4-byte Folded Spill
+ adcl $0, 84(%esp) # 4-byte Folded Spill
+ adcl $0, 116(%esp) # 4-byte Folded Spill
+ movl %ebp, %edi
+ imull 88(%esp), %edi # 4-byte Folded Reload
+ movl %edi, %eax
+ mull 92(%esp) # 4-byte Folded Reload
+ movl %edx, 76(%esp) # 4-byte Spill
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl %edi, %eax
+ mull 120(%esp) # 4-byte Folded Reload
+ movl %edx, 56(%esp) # 4-byte Spill
+ movl %eax, 24(%esp) # 4-byte Spill
+ movl %edi, %eax
+ mull 104(%esp) # 4-byte Folded Reload
+ movl %edx, 52(%esp) # 4-byte Spill
+ movl %eax, 20(%esp) # 4-byte Spill
+ movl %edi, %eax
+ mull 108(%esp) # 4-byte Folded Reload
+ movl %edx, %ecx
+ movl %eax, 16(%esp) # 4-byte Spill
+ movl %edi, %eax
+ mull 100(%esp) # 4-byte Folded Reload
+ movl %edx, 48(%esp) # 4-byte Spill
+ movl %eax, 12(%esp) # 4-byte Spill
+ movl %edi, %eax
+ mull 96(%esp) # 4-byte Folded Reload
+ movl %edx, %esi
+ movl %eax, %ebx
+ movl %edi, %eax
+ mull 112(%esp) # 4-byte Folded Reload
+ movl %eax, %ebp
+ addl %ebx, %edx
+ movl %edx, 44(%esp) # 4-byte Spill
+ adcl 12(%esp), %esi # 4-byte Folded Reload
+ movl %esi, %eax
+ movl 48(%esp), %edi # 4-byte Reload
+ adcl 16(%esp), %edi # 4-byte Folded Reload
+ adcl 20(%esp), %ecx # 4-byte Folded Reload
+ movl 52(%esp), %ebx # 4-byte Reload
+ adcl 24(%esp), %ebx # 4-byte Folded Reload
+ movl 56(%esp), %edx # 4-byte Reload
+ adcl 28(%esp), %edx # 4-byte Folded Reload
+ movl 76(%esp), %esi # 4-byte Reload
+ adcl $0, %esi
+ addl 32(%esp), %ebp # 4-byte Folded Reload
+ movl 44(%esp), %ebp # 4-byte Reload
+ adcl 36(%esp), %ebp # 4-byte Folded Reload
+ movl %ebp, 44(%esp) # 4-byte Spill
+ adcl 40(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 40(%esp) # 4-byte Spill
+ adcl 60(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 48(%esp) # 4-byte Spill
+ adcl 64(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 60(%esp) # 4-byte Spill
+ adcl 68(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 52(%esp) # 4-byte Spill
+ adcl 72(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 56(%esp) # 4-byte Spill
+ adcl 80(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 76(%esp) # 4-byte Spill
+ adcl $0, 84(%esp) # 4-byte Folded Spill
+ adcl $0, 116(%esp) # 4-byte Folded Spill
+ movl 88(%esp), %ecx # 4-byte Reload
+ imull %ebp, %ecx
+ movl %ecx, %eax
+ mull 92(%esp) # 4-byte Folded Reload
+ movl %edx, 72(%esp) # 4-byte Spill
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ mull 120(%esp) # 4-byte Folded Reload
+ movl %edx, 64(%esp) # 4-byte Spill
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ mull 104(%esp) # 4-byte Folded Reload
+ movl %edx, 88(%esp) # 4-byte Spill
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ mull 108(%esp) # 4-byte Folded Reload
+ movl %edx, %ebp
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ mull 100(%esp) # 4-byte Folded Reload
+ movl %edx, %ebx
+ movl %eax, 24(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ mull 112(%esp) # 4-byte Folded Reload
+ movl %edx, %edi
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ mull 96(%esp) # 4-byte Folded Reload
+ movl %edx, %esi
+ addl %edi, %eax
+ movl %eax, %edi
+ adcl 24(%esp), %esi # 4-byte Folded Reload
+ adcl 80(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 80(%esp) # 4-byte Spill
+ adcl 32(%esp), %ebp # 4-byte Folded Reload
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 36(%esp), %eax # 4-byte Folded Reload
+ movl 64(%esp), %edx # 4-byte Reload
+ adcl 68(%esp), %edx # 4-byte Folded Reload
+ movl 72(%esp), %ecx # 4-byte Reload
+ adcl $0, %ecx
+ movl 28(%esp), %ebx # 4-byte Reload
+ addl 44(%esp), %ebx # 4-byte Folded Reload
+ adcl 40(%esp), %edi # 4-byte Folded Reload
+ adcl 48(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 64(%esp) # 4-byte Spill
+ movl 80(%esp), %ebx # 4-byte Reload
+ adcl 60(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 80(%esp) # 4-byte Spill
+ adcl 52(%esp), %ebp # 4-byte Folded Reload
+ movl %ebp, 68(%esp) # 4-byte Spill
+ adcl 56(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 88(%esp) # 4-byte Spill
+ adcl 76(%esp), %edx # 4-byte Folded Reload
+ movl %edx, %eax
+ adcl 84(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 72(%esp) # 4-byte Spill
+ movl 116(%esp), %ebx # 4-byte Reload
+ adcl $0, %ebx
+ movl %edi, %edx
+ movl %edx, %ecx
+ subl 112(%esp), %ecx # 4-byte Folded Reload
+ sbbl 96(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 84(%esp) # 4-byte Spill
+ movl 80(%esp), %edi # 4-byte Reload
+ sbbl 100(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 96(%esp) # 4-byte Spill
+ sbbl 108(%esp), %ebp # 4-byte Folded Reload
+ movl %ebp, 100(%esp) # 4-byte Spill
+ movl 88(%esp), %esi # 4-byte Reload
+ sbbl 104(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 108(%esp) # 4-byte Spill
+ movl %eax, %esi
+ movl %esi, %ebp
+ sbbl 120(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 72(%esp), %esi # 4-byte Reload
+ movl %esi, %eax
+ sbbl 92(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 120(%esp) # 4-byte Spill
+ sbbl $0, %ebx
+ andl $1, %ebx
+ movl %ebx, 116(%esp) # 4-byte Spill
+ jne .LBB100_2
+# BB#1:
+ movl %ecx, %edx
+.LBB100_2:
+ movl 144(%esp), %edi
+ movl %edx, (%edi)
+ movl 116(%esp), %eax # 4-byte Reload
+ testb %al, %al
+ movl 64(%esp), %eax # 4-byte Reload
+ jne .LBB100_4
+# BB#3:
+ movl 84(%esp), %eax # 4-byte Reload
+.LBB100_4:
+ movl %eax, 4(%edi)
+ movl 80(%esp), %eax # 4-byte Reload
+ jne .LBB100_6
+# BB#5:
+ movl 96(%esp), %eax # 4-byte Reload
+.LBB100_6:
+ movl %eax, 8(%edi)
+ movl 88(%esp), %eax # 4-byte Reload
+ movl 68(%esp), %ecx # 4-byte Reload
+ jne .LBB100_8
+# BB#7:
+ movl 100(%esp), %ecx # 4-byte Reload
+.LBB100_8:
+ movl %ecx, 12(%edi)
+ jne .LBB100_10
+# BB#9:
+ movl 108(%esp), %eax # 4-byte Reload
+.LBB100_10:
+ movl %eax, 16(%edi)
+ jne .LBB100_12
+# BB#11:
+ movl 112(%esp), %ebp # 4-byte Reload
+.LBB100_12:
+ movl %ebp, 20(%edi)
+ jne .LBB100_14
+# BB#13:
+ movl 120(%esp), %esi # 4-byte Reload
+.LBB100_14:
+ movl %esi, 24(%edi)
+ addl $124, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end100:
+ .size mcl_fp_montRed7L, .Lfunc_end100-mcl_fp_montRed7L
+
+ .globl mcl_fp_addPre7L
+ .align 16, 0x90
+ .type mcl_fp_addPre7L,@function
+mcl_fp_addPre7L: # @mcl_fp_addPre7L
+# BB#0:
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ movl 24(%esp), %eax
+ movl (%eax), %ecx
+ movl 4(%eax), %edx
+ movl 20(%esp), %esi
+ addl (%esi), %ecx
+ adcl 4(%esi), %edx
+ movl 8(%eax), %edi
+ adcl 8(%esi), %edi
+ movl 16(%esp), %ebx
+ movl %ecx, (%ebx)
+ movl 12(%esi), %ecx
+ movl %edx, 4(%ebx)
+ movl 16(%esi), %edx
+ adcl 12(%eax), %ecx
+ adcl 16(%eax), %edx
+ movl %edi, 8(%ebx)
+ movl 20(%eax), %edi
+ movl %ecx, 12(%ebx)
+ movl 20(%esi), %ecx
+ adcl %edi, %ecx
+ movl %edx, 16(%ebx)
+ movl %ecx, 20(%ebx)
+ movl 24(%eax), %eax
+ movl 24(%esi), %ecx
+ adcl %eax, %ecx
+ movl %ecx, 24(%ebx)
+ sbbl %eax, %eax
+ andl $1, %eax
+ popl %esi
+ popl %edi
+ popl %ebx
+ retl
+.Lfunc_end101:
+ .size mcl_fp_addPre7L, .Lfunc_end101-mcl_fp_addPre7L
+
+ .globl mcl_fp_subPre7L
+ .align 16, 0x90
+ .type mcl_fp_subPre7L,@function
+mcl_fp_subPre7L: # @mcl_fp_subPre7L
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ movl 24(%esp), %ecx
+ movl (%ecx), %edx
+ movl 4(%ecx), %esi
+ xorl %eax, %eax
+ movl 28(%esp), %edi
+ subl (%edi), %edx
+ sbbl 4(%edi), %esi
+ movl 8(%ecx), %ebx
+ sbbl 8(%edi), %ebx
+ movl 20(%esp), %ebp
+ movl %edx, (%ebp)
+ movl 12(%ecx), %edx
+ sbbl 12(%edi), %edx
+ movl %esi, 4(%ebp)
+ movl 16(%ecx), %esi
+ sbbl 16(%edi), %esi
+ movl %ebx, 8(%ebp)
+ movl 20(%edi), %ebx
+ movl %edx, 12(%ebp)
+ movl 20(%ecx), %edx
+ sbbl %ebx, %edx
+ movl %esi, 16(%ebp)
+ movl %edx, 20(%ebp)
+ movl 24(%edi), %edx
+ movl 24(%ecx), %ecx
+ sbbl %edx, %ecx
+ movl %ecx, 24(%ebp)
+ sbbl $0, %eax
+ andl $1, %eax
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end102:
+ .size mcl_fp_subPre7L, .Lfunc_end102-mcl_fp_subPre7L
+
+ .globl mcl_fp_shr1_7L
+ .align 16, 0x90
+ .type mcl_fp_shr1_7L,@function
+mcl_fp_shr1_7L: # @mcl_fp_shr1_7L
+# BB#0:
+ pushl %esi
+ movl 12(%esp), %eax
+ movl (%eax), %ecx
+ movl 4(%eax), %edx
+ shrdl $1, %edx, %ecx
+ movl 8(%esp), %esi
+ movl %ecx, (%esi)
+ movl 8(%eax), %ecx
+ shrdl $1, %ecx, %edx
+ movl %edx, 4(%esi)
+ movl 12(%eax), %edx
+ shrdl $1, %edx, %ecx
+ movl %ecx, 8(%esi)
+ movl 16(%eax), %ecx
+ shrdl $1, %ecx, %edx
+ movl %edx, 12(%esi)
+ movl 20(%eax), %edx
+ shrdl $1, %edx, %ecx
+ movl %ecx, 16(%esi)
+ movl 24(%eax), %eax
+ shrdl $1, %eax, %edx
+ movl %edx, 20(%esi)
+ shrl %eax
+ movl %eax, 24(%esi)
+ popl %esi
+ retl
+.Lfunc_end103:
+ .size mcl_fp_shr1_7L, .Lfunc_end103-mcl_fp_shr1_7L
+
+ .globl mcl_fp_add7L
+ .align 16, 0x90
+ .type mcl_fp_add7L,@function
+mcl_fp_add7L: # @mcl_fp_add7L
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $20, %esp
+ movl 48(%esp), %ebp
+ movl (%ebp), %eax
+ movl 4(%ebp), %edi
+ movl 44(%esp), %ecx
+ addl (%ecx), %eax
+ adcl 4(%ecx), %edi
+ movl 8(%ebp), %esi
+ adcl 8(%ecx), %esi
+ movl 12(%ecx), %edx
+ movl 16(%ecx), %ebx
+ adcl 12(%ebp), %edx
+ movl %edx, 16(%esp) # 4-byte Spill
+ adcl 16(%ebp), %ebx
+ movl %ebx, 12(%esp) # 4-byte Spill
+ movl %ebp, %ebx
+ movl 20(%ecx), %ebp
+ adcl 20(%ebx), %ebp
+ movl 24(%ecx), %edx
+ adcl 24(%ebx), %edx
+ movl 40(%esp), %ecx
+ movl %eax, (%ecx)
+ movl %edi, 4(%ecx)
+ movl %esi, 8(%ecx)
+ movl 16(%esp), %ebx # 4-byte Reload
+ movl %ebx, 12(%ecx)
+ movl 12(%esp), %ebx # 4-byte Reload
+ movl %ebx, 16(%ecx)
+ movl %ebp, 20(%ecx)
+ movl %edx, 24(%ecx)
+ sbbl %ebx, %ebx
+ andl $1, %ebx
+ movl 52(%esp), %ecx
+ subl (%ecx), %eax
+ movl %eax, 8(%esp) # 4-byte Spill
+ movl 12(%esp), %ecx # 4-byte Reload
+ movl 52(%esp), %eax
+ sbbl 4(%eax), %edi
+ movl %edi, 4(%esp) # 4-byte Spill
+ movl %eax, %edi
+ sbbl 8(%edi), %esi
+ movl %esi, (%esp) # 4-byte Spill
+ movl 16(%esp), %eax # 4-byte Reload
+ sbbl 12(%edi), %eax
+ movl %eax, 16(%esp) # 4-byte Spill
+ sbbl 16(%edi), %ecx
+ movl %ecx, %esi
+ sbbl 20(%edi), %ebp
+ sbbl 24(%edi), %edx
+ sbbl $0, %ebx
+ testb $1, %bl
+ jne .LBB104_2
+# BB#1: # %nocarry
+ movl 8(%esp), %ecx # 4-byte Reload
+ movl 40(%esp), %eax
+ movl %eax, %ebx
+ movl %ecx, (%ebx)
+ movl 4(%esp), %ecx # 4-byte Reload
+ movl %ecx, 4(%ebx)
+ movl (%esp), %eax # 4-byte Reload
+ movl %eax, 8(%ebx)
+ movl 16(%esp), %eax # 4-byte Reload
+ movl %eax, 12(%ebx)
+ movl %esi, 16(%ebx)
+ movl %ebp, 20(%ebx)
+ movl %edx, 24(%ebx)
+.LBB104_2: # %carry
+ addl $20, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end104:
+ .size mcl_fp_add7L, .Lfunc_end104-mcl_fp_add7L
+
+ .globl mcl_fp_addNF7L
+ .align 16, 0x90
+ .type mcl_fp_addNF7L,@function
+mcl_fp_addNF7L: # @mcl_fp_addNF7L
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $52, %esp
+ movl 80(%esp), %eax
+ movl (%eax), %ecx
+ movl 4(%eax), %edx
+ movl 76(%esp), %esi
+ addl (%esi), %ecx
+ movl %ecx, 44(%esp) # 4-byte Spill
+ adcl 4(%esi), %edx
+ movl %edx, 28(%esp) # 4-byte Spill
+ movl 24(%eax), %ecx
+ movl %ecx, 48(%esp) # 4-byte Spill
+ movl 20(%eax), %ebx
+ movl 16(%eax), %edi
+ movl 12(%eax), %ebp
+ movl 8(%eax), %ecx
+ adcl 8(%esi), %ecx
+ movl %ecx, 24(%esp) # 4-byte Spill
+ adcl 12(%esi), %ebp
+ movl %ebp, 32(%esp) # 4-byte Spill
+ adcl 16(%esi), %edi
+ movl %edi, 36(%esp) # 4-byte Spill
+ adcl 20(%esi), %ebx
+ movl %ebx, 40(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 24(%esi), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 84(%esp), %eax
+ movl 44(%esp), %esi # 4-byte Reload
+ subl (%eax), %esi
+ movl %esi, (%esp) # 4-byte Spill
+ sbbl 4(%eax), %edx
+ movl %edx, 4(%esp) # 4-byte Spill
+ movl 44(%esp), %esi # 4-byte Reload
+ sbbl 8(%eax), %ecx
+ movl %ecx, 8(%esp) # 4-byte Spill
+ sbbl 12(%eax), %ebp
+ movl %ebp, 16(%esp) # 4-byte Spill
+ sbbl 16(%eax), %edi
+ movl %edi, 20(%esp) # 4-byte Spill
+ sbbl 20(%eax), %ebx
+ movl %ebx, 12(%esp) # 4-byte Spill
+ movl 48(%esp), %edi # 4-byte Reload
+ sbbl 24(%eax), %edi
+ movl %edi, %ecx
+ sarl $31, %ecx
+ testl %ecx, %ecx
+ js .LBB105_2
+# BB#1:
+ movl (%esp), %esi # 4-byte Reload
+.LBB105_2:
+ movl 72(%esp), %ecx
+ movl %esi, (%ecx)
+ movl 28(%esp), %eax # 4-byte Reload
+ js .LBB105_4
+# BB#3:
+ movl 4(%esp), %eax # 4-byte Reload
+.LBB105_4:
+ movl %eax, 4(%ecx)
+ movl 48(%esp), %ebp # 4-byte Reload
+ movl 40(%esp), %ecx # 4-byte Reload
+ movl 36(%esp), %edx # 4-byte Reload
+ movl 32(%esp), %esi # 4-byte Reload
+ movl 24(%esp), %ebx # 4-byte Reload
+ js .LBB105_6
+# BB#5:
+ movl 8(%esp), %ebx # 4-byte Reload
+.LBB105_6:
+ movl 72(%esp), %eax
+ movl %ebx, 8(%eax)
+ movl %eax, %ebx
+ js .LBB105_8
+# BB#7:
+ movl 16(%esp), %esi # 4-byte Reload
+.LBB105_8:
+ movl %esi, 12(%ebx)
+ js .LBB105_10
+# BB#9:
+ movl 20(%esp), %edx # 4-byte Reload
+.LBB105_10:
+ movl %edx, 16(%ebx)
+ js .LBB105_12
+# BB#11:
+ movl 12(%esp), %ecx # 4-byte Reload
+.LBB105_12:
+ movl %ecx, 20(%ebx)
+ js .LBB105_14
+# BB#13:
+ movl %edi, %ebp
+.LBB105_14:
+ movl %ebp, 24(%ebx)
+ addl $52, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end105:
+ .size mcl_fp_addNF7L, .Lfunc_end105-mcl_fp_addNF7L
+
+ .globl mcl_fp_sub7L
+ .align 16, 0x90
+ .type mcl_fp_sub7L,@function
+mcl_fp_sub7L: # @mcl_fp_sub7L
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $24, %esp
+ movl 48(%esp), %edi
+ movl (%edi), %eax
+ movl 4(%edi), %ecx
+ xorl %ebx, %ebx
+ movl 52(%esp), %esi
+ subl (%esi), %eax
+ movl %eax, 16(%esp) # 4-byte Spill
+ sbbl 4(%esi), %ecx
+ movl %ecx, 20(%esp) # 4-byte Spill
+ movl 8(%edi), %edx
+ sbbl 8(%esi), %edx
+ movl %edx, 4(%esp) # 4-byte Spill
+ movl 12(%edi), %ecx
+ sbbl 12(%esi), %ecx
+ movl %ecx, 8(%esp) # 4-byte Spill
+ movl 16(%edi), %eax
+ sbbl 16(%esi), %eax
+ movl %eax, (%esp) # 4-byte Spill
+ movl 20(%edi), %ebp
+ sbbl 20(%esi), %ebp
+ movl %ebp, 12(%esp) # 4-byte Spill
+ movl 24(%edi), %edi
+ sbbl 24(%esi), %edi
+ sbbl $0, %ebx
+ testb $1, %bl
+ movl 44(%esp), %ebx
+ movl 16(%esp), %esi # 4-byte Reload
+ movl %esi, (%ebx)
+ movl 20(%esp), %esi # 4-byte Reload
+ movl %esi, 4(%ebx)
+ movl %edx, 8(%ebx)
+ movl %ecx, 12(%ebx)
+ movl %eax, 16(%ebx)
+ movl %ebp, 20(%ebx)
+ movl %edi, 24(%ebx)
+ je .LBB106_2
+# BB#1: # %carry
+ movl 56(%esp), %ebp
+ movl 16(%esp), %ecx # 4-byte Reload
+ addl (%ebp), %ecx
+ movl %ecx, (%ebx)
+ movl 20(%esp), %edx # 4-byte Reload
+ adcl 4(%ebp), %edx
+ movl %edx, 4(%ebx)
+ movl 4(%esp), %ecx # 4-byte Reload
+ adcl 8(%ebp), %ecx
+ movl 12(%ebp), %eax
+ adcl 8(%esp), %eax # 4-byte Folded Reload
+ movl %ecx, 8(%ebx)
+ movl 16(%ebp), %ecx
+ adcl (%esp), %ecx # 4-byte Folded Reload
+ movl %eax, 12(%ebx)
+ movl %ecx, 16(%ebx)
+ movl 20(%ebp), %eax
+ adcl 12(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 20(%ebx)
+ movl 24(%ebp), %eax
+ adcl %edi, %eax
+ movl %eax, 24(%ebx)
+.LBB106_2: # %nocarry
+ addl $24, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end106:
+ .size mcl_fp_sub7L, .Lfunc_end106-mcl_fp_sub7L
+
+ .globl mcl_fp_subNF7L
+ .align 16, 0x90
+ .type mcl_fp_subNF7L,@function
+mcl_fp_subNF7L: # @mcl_fp_subNF7L
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $32, %esp
+ movl 56(%esp), %eax
+ movl (%eax), %esi
+ movl 4(%eax), %edx
+ movl 60(%esp), %ecx
+ subl (%ecx), %esi
+ movl %esi, 20(%esp) # 4-byte Spill
+ sbbl 4(%ecx), %edx
+ movl %edx, 24(%esp) # 4-byte Spill
+ movl 24(%eax), %edx
+ movl 20(%eax), %esi
+ movl 16(%eax), %edi
+ movl 12(%eax), %ebx
+ movl 8(%eax), %eax
+ sbbl 8(%ecx), %eax
+ movl %eax, 4(%esp) # 4-byte Spill
+ sbbl 12(%ecx), %ebx
+ movl %ebx, 12(%esp) # 4-byte Spill
+ sbbl 16(%ecx), %edi
+ movl %edi, 16(%esp) # 4-byte Spill
+ sbbl 20(%ecx), %esi
+ movl %esi, 28(%esp) # 4-byte Spill
+ sbbl 24(%ecx), %edx
+ movl %edx, 8(%esp) # 4-byte Spill
+ movl %edx, %ecx
+ sarl $31, %ecx
+ movl %ecx, %eax
+ shldl $1, %edx, %eax
+ movl 64(%esp), %edx
+ andl (%edx), %eax
+ movl 24(%edx), %esi
+ andl %ecx, %esi
+ movl %esi, (%esp) # 4-byte Spill
+ movl 20(%edx), %ebx
+ andl %ecx, %ebx
+ movl 16(%edx), %edi
+ andl %ecx, %edi
+ movl 12(%edx), %esi
+ andl %ecx, %esi
+ movl 64(%esp), %edx
+ movl 8(%edx), %edx
+ andl %ecx, %edx
+ movl 64(%esp), %ebp
+ andl 4(%ebp), %ecx
+ addl 20(%esp), %eax # 4-byte Folded Reload
+ adcl 24(%esp), %ecx # 4-byte Folded Reload
+ movl 52(%esp), %ebp
+ movl %eax, (%ebp)
+ adcl 4(%esp), %edx # 4-byte Folded Reload
+ movl %ebp, %eax
+ movl %ecx, 4(%eax)
+ adcl 12(%esp), %esi # 4-byte Folded Reload
+ movl %edx, 8(%eax)
+ adcl 16(%esp), %edi # 4-byte Folded Reload
+ movl %esi, 12(%eax)
+ adcl 28(%esp), %ebx # 4-byte Folded Reload
+ movl %edi, 16(%eax)
+ movl %ebx, 20(%eax)
+ movl (%esp), %ecx # 4-byte Reload
+ adcl 8(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 24(%eax)
+ addl $32, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end107:
+ .size mcl_fp_subNF7L, .Lfunc_end107-mcl_fp_subNF7L
+
+ .globl mcl_fpDbl_add7L
+ .align 16, 0x90
+ .type mcl_fpDbl_add7L,@function
+mcl_fpDbl_add7L: # @mcl_fpDbl_add7L
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $44, %esp
+ movl 72(%esp), %esi
+ movl 68(%esp), %edx
+ movl 12(%edx), %edi
+ movl 16(%edx), %ecx
+ movl 8(%esi), %eax
+ movl (%esi), %ebx
+ addl (%edx), %ebx
+ movl 64(%esp), %ebp
+ movl %ebx, (%ebp)
+ movl 4(%esi), %ebx
+ adcl 4(%edx), %ebx
+ adcl 8(%edx), %eax
+ adcl 12(%esi), %edi
+ adcl 16(%esi), %ecx
+ movl %ebx, 4(%ebp)
+ movl %esi, %ebx
+ movl 36(%ebx), %esi
+ movl %esi, 36(%esp) # 4-byte Spill
+ movl %eax, 8(%ebp)
+ movl 20(%ebx), %eax
+ movl %edi, 12(%ebp)
+ movl 20(%edx), %edi
+ adcl %eax, %edi
+ movl 24(%ebx), %eax
+ movl %ecx, 16(%ebp)
+ movl 24(%edx), %ecx
+ adcl %eax, %ecx
+ movl 28(%ebx), %eax
+ movl %edi, 20(%ebp)
+ movl 28(%edx), %edi
+ adcl %eax, %edi
+ movl %edi, 20(%esp) # 4-byte Spill
+ movl 32(%ebx), %eax
+ movl %ecx, 24(%ebp)
+ movl 32(%edx), %ecx
+ adcl %eax, %ecx
+ movl %ecx, 40(%esp) # 4-byte Spill
+ movl 36(%edx), %esi
+ adcl 36(%esp), %esi # 4-byte Folded Reload
+ movl 40(%ebx), %ecx
+ movl 40(%edx), %eax
+ adcl %ecx, %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 44(%ebx), %ebp
+ movl 44(%edx), %ecx
+ adcl %ebp, %ecx
+ movl %ecx, 24(%esp) # 4-byte Spill
+ movl 48(%ebx), %ebp
+ movl %ebx, %eax
+ movl 48(%edx), %ebx
+ adcl %ebp, %ebx
+ movl %ebx, 28(%esp) # 4-byte Spill
+ movl 52(%eax), %eax
+ movl 52(%edx), %ebp
+ adcl %eax, %ebp
+ movl %ebp, 32(%esp) # 4-byte Spill
+ sbbl %edx, %edx
+ andl $1, %edx
+ movl 76(%esp), %eax
+ subl (%eax), %edi
+ movl %edi, 8(%esp) # 4-byte Spill
+ movl 40(%esp), %edi # 4-byte Reload
+ sbbl 4(%eax), %edi
+ movl %edi, 4(%esp) # 4-byte Spill
+ movl %esi, %eax
+ movl 76(%esp), %edi
+ sbbl 8(%edi), %eax
+ movl %eax, (%esp) # 4-byte Spill
+ movl 36(%esp), %eax # 4-byte Reload
+ sbbl 12(%edi), %eax
+ movl %eax, 12(%esp) # 4-byte Spill
+ sbbl 16(%edi), %ecx
+ movl %ecx, 16(%esp) # 4-byte Spill
+ sbbl 20(%edi), %ebx
+ sbbl 24(%edi), %ebp
+ sbbl $0, %edx
+ andl $1, %edx
+ jne .LBB108_2
+# BB#1:
+ movl %ebp, 32(%esp) # 4-byte Spill
+.LBB108_2:
+ testb %dl, %dl
+ movl 20(%esp), %ecx # 4-byte Reload
+ jne .LBB108_4
+# BB#3:
+ movl (%esp), %esi # 4-byte Reload
+ movl 4(%esp), %eax # 4-byte Reload
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 8(%esp), %ecx # 4-byte Reload
+.LBB108_4:
+ movl 64(%esp), %eax
+ movl %ecx, 28(%eax)
+ movl 40(%esp), %ecx # 4-byte Reload
+ movl %ecx, 32(%eax)
+ movl %esi, 36(%eax)
+ movl 24(%esp), %edx # 4-byte Reload
+ movl 36(%esp), %ecx # 4-byte Reload
+ jne .LBB108_6
+# BB#5:
+ movl 12(%esp), %ecx # 4-byte Reload
+.LBB108_6:
+ movl %ecx, 40(%eax)
+ movl 28(%esp), %ecx # 4-byte Reload
+ jne .LBB108_8
+# BB#7:
+ movl 16(%esp), %edx # 4-byte Reload
+.LBB108_8:
+ movl %edx, 44(%eax)
+ jne .LBB108_10
+# BB#9:
+ movl %ebx, %ecx
+.LBB108_10:
+ movl %ecx, 48(%eax)
+ movl 32(%esp), %ecx # 4-byte Reload
+ movl %ecx, 52(%eax)
+ addl $44, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end108:
+ .size mcl_fpDbl_add7L, .Lfunc_end108-mcl_fpDbl_add7L
+
+ .globl mcl_fpDbl_sub7L
+ .align 16, 0x90
+ .type mcl_fpDbl_sub7L,@function
+mcl_fpDbl_sub7L: # @mcl_fpDbl_sub7L
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $32, %esp
+ movl 56(%esp), %esi
+ movl (%esi), %eax
+ movl 4(%esi), %edx
+ movl 60(%esp), %edi
+ subl (%edi), %eax
+ sbbl 4(%edi), %edx
+ movl 8(%esi), %ebx
+ sbbl 8(%edi), %ebx
+ movl 52(%esp), %ecx
+ movl %eax, (%ecx)
+ movl 12(%esi), %eax
+ sbbl 12(%edi), %eax
+ movl %edx, 4(%ecx)
+ movl 16(%esi), %edx
+ sbbl 16(%edi), %edx
+ movl %ebx, 8(%ecx)
+ movl 20(%edi), %ebx
+ movl %eax, 12(%ecx)
+ movl 20(%esi), %eax
+ sbbl %ebx, %eax
+ movl 24(%edi), %ebx
+ movl %edx, 16(%ecx)
+ movl 24(%esi), %edx
+ sbbl %ebx, %edx
+ movl 28(%edi), %ebx
+ movl %eax, 20(%ecx)
+ movl 28(%esi), %eax
+ sbbl %ebx, %eax
+ movl %eax, 12(%esp) # 4-byte Spill
+ movl 32(%edi), %eax
+ movl %edx, 24(%ecx)
+ movl 32(%esi), %edx
+ sbbl %eax, %edx
+ movl %edx, 4(%esp) # 4-byte Spill
+ movl 36(%edi), %eax
+ movl 36(%esi), %edx
+ sbbl %eax, %edx
+ movl %edx, 8(%esp) # 4-byte Spill
+ movl 40(%edi), %eax
+ movl 40(%esi), %edx
+ sbbl %eax, %edx
+ movl %edx, 16(%esp) # 4-byte Spill
+ movl 44(%edi), %eax
+ movl 44(%esi), %edx
+ sbbl %eax, %edx
+ movl %edx, 20(%esp) # 4-byte Spill
+ movl 48(%edi), %eax
+ movl 48(%esi), %edx
+ sbbl %eax, %edx
+ movl %edx, 24(%esp) # 4-byte Spill
+ movl 52(%edi), %eax
+ movl 52(%esi), %edx
+ sbbl %eax, %edx
+ movl %edx, 28(%esp) # 4-byte Spill
+ movl $0, %eax
+ sbbl $0, %eax
+ andl $1, %eax
+ movl 64(%esp), %esi
+ jne .LBB109_1
+# BB#2:
+ movl $0, (%esp) # 4-byte Folded Spill
+ jmp .LBB109_3
+.LBB109_1:
+ movl 24(%esi), %edx
+ movl %edx, (%esp) # 4-byte Spill
+.LBB109_3:
+ testb %al, %al
+ jne .LBB109_4
+# BB#5:
+ movl $0, %edi
+ movl $0, %eax
+ jmp .LBB109_6
+.LBB109_4:
+ movl (%esi), %eax
+ movl 4(%esi), %edi
+.LBB109_6:
+ jne .LBB109_7
+# BB#8:
+ movl $0, %ebx
+ jmp .LBB109_9
+.LBB109_7:
+ movl 20(%esi), %ebx
+.LBB109_9:
+ jne .LBB109_10
+# BB#11:
+ movl $0, %ebp
+ jmp .LBB109_12
+.LBB109_10:
+ movl 16(%esi), %ebp
+.LBB109_12:
+ jne .LBB109_13
+# BB#14:
+ movl $0, %edx
+ jmp .LBB109_15
+.LBB109_13:
+ movl 12(%esi), %edx
+.LBB109_15:
+ jne .LBB109_16
+# BB#17:
+ xorl %esi, %esi
+ jmp .LBB109_18
+.LBB109_16:
+ movl 8(%esi), %esi
+.LBB109_18:
+ addl 12(%esp), %eax # 4-byte Folded Reload
+ adcl 4(%esp), %edi # 4-byte Folded Reload
+ movl %eax, 28(%ecx)
+ adcl 8(%esp), %esi # 4-byte Folded Reload
+ movl %edi, 32(%ecx)
+ adcl 16(%esp), %edx # 4-byte Folded Reload
+ movl %esi, 36(%ecx)
+ adcl 20(%esp), %ebp # 4-byte Folded Reload
+ movl %edx, 40(%ecx)
+ adcl 24(%esp), %ebx # 4-byte Folded Reload
+ movl %ebp, 44(%ecx)
+ movl %ebx, 48(%ecx)
+ movl (%esp), %eax # 4-byte Reload
+ adcl 28(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 52(%ecx)
+ addl $32, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end109:
+ .size mcl_fpDbl_sub7L, .Lfunc_end109-mcl_fpDbl_sub7L
+
+ .align 16, 0x90
+ .type .LmulPv256x32,@function
+.LmulPv256x32: # @mulPv256x32
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $48, %esp
+ movl %edx, %esi
+ movl 68(%esp), %ebx
+ movl %ebx, %eax
+ mull 28(%esi)
+ movl %edx, 44(%esp) # 4-byte Spill
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl %ebx, %eax
+ mull 24(%esi)
+ movl %edx, 36(%esp) # 4-byte Spill
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl %ebx, %eax
+ mull 20(%esi)
+ movl %edx, 28(%esp) # 4-byte Spill
+ movl %eax, 24(%esp) # 4-byte Spill
+ movl %ebx, %eax
+ mull 16(%esi)
+ movl %edx, 20(%esp) # 4-byte Spill
+ movl %eax, 16(%esp) # 4-byte Spill
+ movl %ebx, %eax
+ mull 12(%esi)
+ movl %edx, 12(%esp) # 4-byte Spill
+ movl %eax, 8(%esp) # 4-byte Spill
+ movl %ebx, %eax
+ mull 8(%esi)
+ movl %edx, %ebp
+ movl %eax, 4(%esp) # 4-byte Spill
+ movl %ebx, %eax
+ mull 4(%esi)
+ movl %edx, %edi
+ movl %eax, (%esp) # 4-byte Spill
+ movl %ebx, %eax
+ mull (%esi)
+ movl %eax, (%ecx)
+ addl (%esp), %edx # 4-byte Folded Reload
+ movl %edx, 4(%ecx)
+ adcl 4(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 8(%ecx)
+ adcl 8(%esp), %ebp # 4-byte Folded Reload
+ movl %ebp, 12(%ecx)
+ movl 12(%esp), %eax # 4-byte Reload
+ adcl 16(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 16(%ecx)
+ movl 20(%esp), %eax # 4-byte Reload
+ adcl 24(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 20(%ecx)
+ movl 28(%esp), %eax # 4-byte Reload
+ adcl 32(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 24(%ecx)
+ movl 36(%esp), %eax # 4-byte Reload
+ adcl 40(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 28(%ecx)
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl $0, %eax
+ movl %eax, 32(%ecx)
+ movl %ecx, %eax
+ addl $48, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end110:
+ .size .LmulPv256x32, .Lfunc_end110-.LmulPv256x32
+
+ .globl mcl_fp_mulUnitPre8L
+ .align 16, 0x90
+ .type mcl_fp_mulUnitPre8L,@function
+mcl_fp_mulUnitPre8L: # @mcl_fp_mulUnitPre8L
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $60, %esp
+ calll .L111$pb
+.L111$pb:
+ popl %ebx
+.Ltmp2:
+ addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp2-.L111$pb), %ebx
+ movl 88(%esp), %eax
+ movl %eax, (%esp)
+ leal 24(%esp), %ecx
+ movl 84(%esp), %edx
+ calll .LmulPv256x32
+ movl 56(%esp), %eax
+ movl %eax, 20(%esp) # 4-byte Spill
+ movl 52(%esp), %eax
+ movl %eax, 16(%esp) # 4-byte Spill
+ movl 48(%esp), %eax
+ movl %eax, 12(%esp) # 4-byte Spill
+ movl 44(%esp), %esi
+ movl 40(%esp), %edi
+ movl 36(%esp), %ebx
+ movl 32(%esp), %ebp
+ movl 24(%esp), %edx
+ movl 28(%esp), %ecx
+ movl 80(%esp), %eax
+ movl %edx, (%eax)
+ movl %ecx, 4(%eax)
+ movl %ebp, 8(%eax)
+ movl %ebx, 12(%eax)
+ movl %edi, 16(%eax)
+ movl %esi, 20(%eax)
+ movl 12(%esp), %ecx # 4-byte Reload
+ movl %ecx, 24(%eax)
+ movl 16(%esp), %ecx # 4-byte Reload
+ movl %ecx, 28(%eax)
+ movl 20(%esp), %ecx # 4-byte Reload
+ movl %ecx, 32(%eax)
+ addl $60, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end111:
+ .size mcl_fp_mulUnitPre8L, .Lfunc_end111-mcl_fp_mulUnitPre8L
+
+ .globl mcl_fpDbl_mulPre8L
+ .align 16, 0x90
+ .type mcl_fpDbl_mulPre8L,@function
+mcl_fpDbl_mulPre8L: # @mcl_fpDbl_mulPre8L
+# BB#0:
+ pushl %ebp
+ movl %esp, %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $156, %esp
+ calll .L112$pb
+.L112$pb:
+ popl %ebx
+.Ltmp3:
+ addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp3-.L112$pb), %ebx
+ movl %ebx, -96(%ebp) # 4-byte Spill
+ movl 16(%ebp), %esi
+ movl %esi, 8(%esp)
+ movl 12(%ebp), %edi
+ movl %edi, 4(%esp)
+ movl 8(%ebp), %eax
+ movl %eax, (%esp)
+ calll mcl_fpDbl_mulPre4L@PLT
+ leal 16(%esi), %eax
+ movl %eax, 8(%esp)
+ leal 16(%edi), %eax
+ movl %eax, 4(%esp)
+ movl 8(%ebp), %eax
+ leal 32(%eax), %eax
+ movl %eax, (%esp)
+ calll mcl_fpDbl_mulPre4L@PLT
+ movl 24(%edi), %esi
+ movl (%edi), %ebx
+ movl 4(%edi), %eax
+ addl 16(%edi), %ebx
+ movl %ebx, -120(%ebp) # 4-byte Spill
+ adcl 20(%edi), %eax
+ movl %eax, -100(%ebp) # 4-byte Spill
+ adcl 8(%edi), %esi
+ movl %esi, -108(%ebp) # 4-byte Spill
+ seto %al
+ lahf
+ movl %eax, %eax
+ movl %eax, -80(%ebp) # 4-byte Spill
+ movl 16(%ebp), %edi
+ movl (%edi), %eax
+ movl 4(%edi), %ecx
+ addl 16(%edi), %eax
+ adcl 20(%edi), %ecx
+ movl %ecx, -124(%ebp) # 4-byte Spill
+ movl 24(%edi), %edx
+ adcl 8(%edi), %edx
+ movl 28(%edi), %ecx
+ adcl 12(%edi), %ecx
+ pushl %eax
+ seto %al
+ lahf
+ movl %eax, %edi
+ popl %eax
+ movl %edi, -128(%ebp) # 4-byte Spill
+ jb .LBB112_2
+# BB#1:
+ xorl %esi, %esi
+ xorl %ebx, %ebx
+.LBB112_2:
+ movl %ebx, -112(%ebp) # 4-byte Spill
+ movl %esi, -104(%ebp) # 4-byte Spill
+ movl 12(%ebp), %esi
+ movl 28(%esi), %edi
+ movl -80(%ebp), %ebx # 4-byte Reload
+ pushl %eax
+ movl %ebx, %eax
+ addb $127, %al
+ sahf
+ popl %eax
+ adcl 12(%esi), %edi
+ movl %edi, -116(%ebp) # 4-byte Spill
+ movl %ecx, -84(%ebp) # 4-byte Spill
+ movl %edx, %edi
+ movl -124(%ebp), %ebx # 4-byte Reload
+ movl %ebx, -80(%ebp) # 4-byte Spill
+ movl %eax, -92(%ebp) # 4-byte Spill
+ jb .LBB112_4
+# BB#3:
+ movl $0, -84(%ebp) # 4-byte Folded Spill
+ movl $0, %edi
+ movl $0, -80(%ebp) # 4-byte Folded Spill
+ movl $0, -92(%ebp) # 4-byte Folded Spill
+.LBB112_4:
+ movl %edi, -88(%ebp) # 4-byte Spill
+ movl -120(%ebp), %esi # 4-byte Reload
+ movl %esi, -60(%ebp)
+ movl -100(%ebp), %edi # 4-byte Reload
+ movl %edi, -56(%ebp)
+ movl -108(%ebp), %esi # 4-byte Reload
+ movl %esi, -52(%ebp)
+ movl %eax, -76(%ebp)
+ movl %ebx, -72(%ebp)
+ movl %edx, -68(%ebp)
+ movl %ecx, -64(%ebp)
+ sbbl %edx, %edx
+ movl -116(%ebp), %esi # 4-byte Reload
+ movl %esi, -48(%ebp)
+ movl -128(%ebp), %eax # 4-byte Reload
+ movl %eax, %eax
+ addb $127, %al
+ sahf
+ jb .LBB112_6
+# BB#5:
+ movl $0, %esi
+ movl $0, %edi
+.LBB112_6:
+ sbbl %eax, %eax
+ leal -76(%ebp), %ecx
+ movl %ecx, 8(%esp)
+ leal -60(%ebp), %ecx
+ movl %ecx, 4(%esp)
+ leal -44(%ebp), %ecx
+ movl %ecx, (%esp)
+ andl %eax, %edx
+ movl %edi, %eax
+ movl -92(%ebp), %edi # 4-byte Reload
+ addl -112(%ebp), %edi # 4-byte Folded Reload
+ adcl %eax, -80(%ebp) # 4-byte Folded Spill
+ movl -104(%ebp), %eax # 4-byte Reload
+ adcl %eax, -88(%ebp) # 4-byte Folded Spill
+ adcl %esi, -84(%ebp) # 4-byte Folded Spill
+ sbbl %esi, %esi
+ andl $1, %esi
+ andl $1, %edx
+ movl %edx, -92(%ebp) # 4-byte Spill
+ movl -96(%ebp), %ebx # 4-byte Reload
+ calll mcl_fpDbl_mulPre4L@PLT
+ addl -28(%ebp), %edi
+ movl -80(%ebp), %eax # 4-byte Reload
+ adcl -24(%ebp), %eax
+ movl %eax, -80(%ebp) # 4-byte Spill
+ movl -88(%ebp), %eax # 4-byte Reload
+ adcl -20(%ebp), %eax
+ movl %eax, -88(%ebp) # 4-byte Spill
+ movl -84(%ebp), %eax # 4-byte Reload
+ adcl -16(%ebp), %eax
+ movl %eax, -84(%ebp) # 4-byte Spill
+ adcl %esi, -92(%ebp) # 4-byte Folded Spill
+ movl -44(%ebp), %eax
+ movl 8(%ebp), %esi
+ subl (%esi), %eax
+ movl %eax, -116(%ebp) # 4-byte Spill
+ movl -40(%ebp), %ebx
+ sbbl 4(%esi), %ebx
+ movl -36(%ebp), %eax
+ sbbl 8(%esi), %eax
+ movl %eax, -96(%ebp) # 4-byte Spill
+ movl -32(%ebp), %edx
+ sbbl 12(%esi), %edx
+ movl 16(%esi), %eax
+ movl %eax, -100(%ebp) # 4-byte Spill
+ sbbl %eax, %edi
+ movl 20(%esi), %eax
+ movl %eax, -112(%ebp) # 4-byte Spill
+ sbbl %eax, -80(%ebp) # 4-byte Folded Spill
+ movl 24(%esi), %eax
+ movl %eax, -104(%ebp) # 4-byte Spill
+ sbbl %eax, -88(%ebp) # 4-byte Folded Spill
+ movl 28(%esi), %eax
+ movl %eax, -108(%ebp) # 4-byte Spill
+ sbbl %eax, -84(%ebp) # 4-byte Folded Spill
+ sbbl $0, -92(%ebp) # 4-byte Folded Spill
+ movl 32(%esi), %ecx
+ movl %ecx, -132(%ebp) # 4-byte Spill
+ movl -116(%ebp), %eax # 4-byte Reload
+ subl %ecx, %eax
+ movl 36(%esi), %ecx
+ movl %ecx, -136(%ebp) # 4-byte Spill
+ sbbl %ecx, %ebx
+ movl 40(%esi), %ecx
+ movl %ecx, -128(%ebp) # 4-byte Spill
+ sbbl %ecx, -96(%ebp) # 4-byte Folded Spill
+ movl 44(%esi), %ecx
+ movl %ecx, -140(%ebp) # 4-byte Spill
+ sbbl %ecx, %edx
+ movl 48(%esi), %ecx
+ movl %ecx, -144(%ebp) # 4-byte Spill
+ sbbl %ecx, %edi
+ movl 52(%esi), %ecx
+ movl %ecx, -116(%ebp) # 4-byte Spill
+ sbbl %ecx, -80(%ebp) # 4-byte Folded Spill
+ movl 56(%esi), %ecx
+ movl %ecx, -120(%ebp) # 4-byte Spill
+ sbbl %ecx, -88(%ebp) # 4-byte Folded Spill
+ movl 60(%esi), %ecx
+ movl %ecx, -124(%ebp) # 4-byte Spill
+ sbbl %ecx, -84(%ebp) # 4-byte Folded Spill
+ sbbl $0, -92(%ebp) # 4-byte Folded Spill
+ addl -100(%ebp), %eax # 4-byte Folded Reload
+ adcl -112(%ebp), %ebx # 4-byte Folded Reload
+ movl %eax, 16(%esi)
+ movl -96(%ebp), %eax # 4-byte Reload
+ adcl -104(%ebp), %eax # 4-byte Folded Reload
+ movl %ebx, 20(%esi)
+ adcl -108(%ebp), %edx # 4-byte Folded Reload
+ movl %eax, 24(%esi)
+ adcl -132(%ebp), %edi # 4-byte Folded Reload
+ movl %edx, 28(%esi)
+ movl -80(%ebp), %eax # 4-byte Reload
+ adcl -136(%ebp), %eax # 4-byte Folded Reload
+ movl %edi, 32(%esi)
+ movl -88(%ebp), %ecx # 4-byte Reload
+ adcl -128(%ebp), %ecx # 4-byte Folded Reload
+ movl %eax, 36(%esi)
+ movl -84(%ebp), %eax # 4-byte Reload
+ adcl -140(%ebp), %eax # 4-byte Folded Reload
+ movl %ecx, 40(%esi)
+ movl -92(%ebp), %ecx # 4-byte Reload
+ adcl -144(%ebp), %ecx # 4-byte Folded Reload
+ movl %eax, 44(%esi)
+ movl %ecx, 48(%esi)
+ movl -116(%ebp), %eax # 4-byte Reload
+ adcl $0, %eax
+ movl %eax, 52(%esi)
+ movl -120(%ebp), %eax # 4-byte Reload
+ adcl $0, %eax
+ movl %eax, 56(%esi)
+ movl -124(%ebp), %eax # 4-byte Reload
+ adcl $0, %eax
+ movl %eax, 60(%esi)
+ addl $156, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end112:
+ .size mcl_fpDbl_mulPre8L, .Lfunc_end112-mcl_fpDbl_mulPre8L
+
+ .globl mcl_fpDbl_sqrPre8L
+ .align 16, 0x90
+ .type mcl_fpDbl_sqrPre8L,@function
+mcl_fpDbl_sqrPre8L: # @mcl_fpDbl_sqrPre8L
+# BB#0:
+ pushl %ebp
+ movl %esp, %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $156, %esp
+ calll .L113$pb
+.L113$pb:
+ popl %ebx
+.Ltmp4:
+ addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp4-.L113$pb), %ebx
+ movl %ebx, -96(%ebp) # 4-byte Spill
+ movl 12(%ebp), %edi
+ movl %edi, 8(%esp)
+ movl %edi, 4(%esp)
+ movl 8(%ebp), %esi
+ movl %esi, (%esp)
+ calll mcl_fpDbl_mulPre4L@PLT
+ leal 16(%edi), %eax
+ movl %eax, 8(%esp)
+ movl %eax, 4(%esp)
+ leal 32(%esi), %eax
+ movl %eax, (%esp)
+ calll mcl_fpDbl_mulPre4L@PLT
+ movl (%edi), %esi
+ movl 4(%edi), %ecx
+ addl 16(%edi), %esi
+ movl %esi, -108(%ebp) # 4-byte Spill
+ adcl 20(%edi), %ecx
+ seto %al
+ lahf
+ movl %eax, %edx
+ addl %esi, %esi
+ movl %esi, -84(%ebp) # 4-byte Spill
+ movl %ecx, %esi
+ adcl %esi, %esi
+ movl %esi, -80(%ebp) # 4-byte Spill
+ pushl %eax
+ seto %al
+ lahf
+ movl %eax, %esi
+ popl %eax
+ movl %esi, -88(%ebp) # 4-byte Spill
+ movl 24(%edi), %esi
+ pushl %eax
+ movl %edx, %eax
+ addb $127, %al
+ sahf
+ popl %eax
+ adcl 8(%edi), %esi
+ movl 28(%edi), %edx
+ adcl 12(%edi), %edx
+ pushl %eax
+ seto %al
+ lahf
+ movl %eax, %edi
+ popl %eax
+ movl %edi, -100(%ebp) # 4-byte Spill
+ seto %al
+ lahf
+ movl %eax, %eax
+ movl %eax, -104(%ebp) # 4-byte Spill
+ seto %al
+ lahf
+ movl %eax, %ebx
+ sbbl %edi, %edi
+ movl %edi, -92(%ebp) # 4-byte Spill
+ pushl %eax
+ movl %ebx, %eax
+ addb $127, %al
+ sahf
+ popl %eax
+ jb .LBB113_2
+# BB#1:
+ movl $0, -80(%ebp) # 4-byte Folded Spill
+ movl $0, -84(%ebp) # 4-byte Folded Spill
+.LBB113_2:
+ movl %esi, %ebx
+ movl -88(%ebp), %edi # 4-byte Reload
+ movl %edi, %eax
+ addb $127, %al
+ sahf
+ adcl %ebx, %ebx
+ movl %edx, %edi
+ adcl %edi, %edi
+ movl -104(%ebp), %eax # 4-byte Reload
+ movl %eax, %eax
+ addb $127, %al
+ sahf
+ jb .LBB113_4
+# BB#3:
+ xorl %edi, %edi
+ xorl %ebx, %ebx
+.LBB113_4:
+ movl %ebx, -88(%ebp) # 4-byte Spill
+ movl -108(%ebp), %eax # 4-byte Reload
+ movl %eax, -60(%ebp)
+ movl %ecx, -56(%ebp)
+ movl %esi, -52(%ebp)
+ movl %edx, -48(%ebp)
+ movl %eax, -76(%ebp)
+ movl %ecx, -72(%ebp)
+ movl %esi, -68(%ebp)
+ movl %edx, -64(%ebp)
+ movl -100(%ebp), %eax # 4-byte Reload
+ movl %eax, %eax
+ addb $127, %al
+ sahf
+ jb .LBB113_5
+# BB#6:
+ movl $0, -100(%ebp) # 4-byte Folded Spill
+ jmp .LBB113_7
+.LBB113_5:
+ shrl $31, %edx
+ movl %edx, -100(%ebp) # 4-byte Spill
+.LBB113_7:
+ leal -76(%ebp), %eax
+ movl %eax, 8(%esp)
+ leal -60(%ebp), %eax
+ movl %eax, 4(%esp)
+ leal -44(%ebp), %eax
+ movl %eax, (%esp)
+ movl -92(%ebp), %esi # 4-byte Reload
+ andl $1, %esi
+ movl -96(%ebp), %ebx # 4-byte Reload
+ calll mcl_fpDbl_mulPre4L@PLT
+ movl -84(%ebp), %eax # 4-byte Reload
+ addl -28(%ebp), %eax
+ movl %eax, -84(%ebp) # 4-byte Spill
+ movl -80(%ebp), %eax # 4-byte Reload
+ adcl -24(%ebp), %eax
+ movl %eax, -80(%ebp) # 4-byte Spill
+ movl -88(%ebp), %eax # 4-byte Reload
+ adcl -20(%ebp), %eax
+ movl %eax, -88(%ebp) # 4-byte Spill
+ adcl -16(%ebp), %edi
+ movl %edi, -92(%ebp) # 4-byte Spill
+ adcl -100(%ebp), %esi # 4-byte Folded Reload
+ movl -44(%ebp), %eax
+ movl 8(%ebp), %edi
+ subl (%edi), %eax
+ movl %eax, -116(%ebp) # 4-byte Spill
+ movl -40(%ebp), %ebx
+ sbbl 4(%edi), %ebx
+ movl -36(%ebp), %eax
+ sbbl 8(%edi), %eax
+ movl %eax, -96(%ebp) # 4-byte Spill
+ movl -32(%ebp), %edx
+ sbbl 12(%edi), %edx
+ movl 16(%edi), %eax
+ movl %eax, -100(%ebp) # 4-byte Spill
+ sbbl %eax, -84(%ebp) # 4-byte Folded Spill
+ movl 20(%edi), %eax
+ movl %eax, -112(%ebp) # 4-byte Spill
+ sbbl %eax, -80(%ebp) # 4-byte Folded Spill
+ movl 24(%edi), %eax
+ movl %eax, -104(%ebp) # 4-byte Spill
+ sbbl %eax, -88(%ebp) # 4-byte Folded Spill
+ movl 28(%edi), %eax
+ movl %eax, -108(%ebp) # 4-byte Spill
+ sbbl %eax, -92(%ebp) # 4-byte Folded Spill
+ sbbl $0, %esi
+ movl 32(%edi), %ecx
+ movl %ecx, -132(%ebp) # 4-byte Spill
+ movl -116(%ebp), %eax # 4-byte Reload
+ subl %ecx, %eax
+ movl 36(%edi), %ecx
+ movl %ecx, -136(%ebp) # 4-byte Spill
+ sbbl %ecx, %ebx
+ movl 40(%edi), %ecx
+ movl %ecx, -128(%ebp) # 4-byte Spill
+ sbbl %ecx, -96(%ebp) # 4-byte Folded Spill
+ movl 44(%edi), %ecx
+ movl %ecx, -140(%ebp) # 4-byte Spill
+ sbbl %ecx, %edx
+ movl 48(%edi), %ecx
+ movl %ecx, -144(%ebp) # 4-byte Spill
+ sbbl %ecx, -84(%ebp) # 4-byte Folded Spill
+ movl 52(%edi), %ecx
+ movl %ecx, -116(%ebp) # 4-byte Spill
+ sbbl %ecx, -80(%ebp) # 4-byte Folded Spill
+ movl 56(%edi), %ecx
+ movl %ecx, -120(%ebp) # 4-byte Spill
+ sbbl %ecx, -88(%ebp) # 4-byte Folded Spill
+ movl 60(%edi), %ecx
+ movl %ecx, -124(%ebp) # 4-byte Spill
+ sbbl %ecx, -92(%ebp) # 4-byte Folded Spill
+ sbbl $0, %esi
+ addl -100(%ebp), %eax # 4-byte Folded Reload
+ adcl -112(%ebp), %ebx # 4-byte Folded Reload
+ movl %eax, 16(%edi)
+ movl -96(%ebp), %eax # 4-byte Reload
+ adcl -104(%ebp), %eax # 4-byte Folded Reload
+ movl %ebx, 20(%edi)
+ adcl -108(%ebp), %edx # 4-byte Folded Reload
+ movl %eax, 24(%edi)
+ movl -84(%ebp), %eax # 4-byte Reload
+ adcl -132(%ebp), %eax # 4-byte Folded Reload
+ movl %edx, 28(%edi)
+ movl -80(%ebp), %ecx # 4-byte Reload
+ adcl -136(%ebp), %ecx # 4-byte Folded Reload
+ movl %eax, 32(%edi)
+ movl -88(%ebp), %eax # 4-byte Reload
+ adcl -128(%ebp), %eax # 4-byte Folded Reload
+ movl %ecx, 36(%edi)
+ movl -92(%ebp), %ecx # 4-byte Reload
+ adcl -140(%ebp), %ecx # 4-byte Folded Reload
+ movl %eax, 40(%edi)
+ adcl -144(%ebp), %esi # 4-byte Folded Reload
+ movl %ecx, 44(%edi)
+ movl %esi, 48(%edi)
+ movl -116(%ebp), %eax # 4-byte Reload
+ adcl $0, %eax
+ movl %eax, 52(%edi)
+ movl -120(%ebp), %eax # 4-byte Reload
+ adcl $0, %eax
+ movl %eax, 56(%edi)
+ movl -124(%ebp), %eax # 4-byte Reload
+ adcl $0, %eax
+ movl %eax, 60(%edi)
+ addl $156, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end113:
+ .size mcl_fpDbl_sqrPre8L, .Lfunc_end113-mcl_fpDbl_sqrPre8L
+
+ .globl mcl_fp_mont8L
+ .align 16, 0x90
+ .type mcl_fp_mont8L,@function
+mcl_fp_mont8L: # @mcl_fp_mont8L
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $700, %esp # imm = 0x2BC
+ calll .L114$pb
+.L114$pb:
+ popl %ebx
+.Ltmp5:
+ addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp5-.L114$pb), %ebx
+ movl 732(%esp), %eax
+ movl -4(%eax), %esi
+ movl %esi, 24(%esp) # 4-byte Spill
+ movl 728(%esp), %eax
+ movl (%eax), %eax
+ movl %eax, (%esp)
+ leal 664(%esp), %ecx
+ movl 724(%esp), %edx
+ calll .LmulPv256x32
+ movl 664(%esp), %ebp
+ movl 668(%esp), %edi
+ movl %ebp, %eax
+ imull %esi, %eax
+ movl 696(%esp), %ecx
+ movl %ecx, 56(%esp) # 4-byte Spill
+ movl 692(%esp), %ecx
+ movl %ecx, 52(%esp) # 4-byte Spill
+ movl 688(%esp), %ecx
+ movl %ecx, 48(%esp) # 4-byte Spill
+ movl 684(%esp), %ecx
+ movl %ecx, 36(%esp) # 4-byte Spill
+ movl 680(%esp), %ecx
+ movl %ecx, 28(%esp) # 4-byte Spill
+ movl 676(%esp), %ecx
+ movl %ecx, 40(%esp) # 4-byte Spill
+ movl 672(%esp), %esi
+ movl %eax, (%esp)
+ leal 624(%esp), %ecx
+ movl 732(%esp), %edx
+ calll .LmulPv256x32
+ addl 624(%esp), %ebp
+ adcl 628(%esp), %edi
+ adcl 632(%esp), %esi
+ movl 40(%esp), %eax # 4-byte Reload
+ adcl 636(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 28(%esp), %eax # 4-byte Reload
+ adcl 640(%esp), %eax
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl 36(%esp), %eax # 4-byte Reload
+ adcl 644(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 648(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 652(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 56(%esp), %ebp # 4-byte Reload
+ adcl 656(%esp), %ebp
+ sbbl %eax, %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 728(%esp), %eax
+ movl 4(%eax), %eax
+ movl %eax, (%esp)
+ leal 584(%esp), %ecx
+ movl 724(%esp), %edx
+ calll .LmulPv256x32
+ movl 60(%esp), %eax # 4-byte Reload
+ andl $1, %eax
+ addl 584(%esp), %edi
+ adcl 588(%esp), %esi
+ movl 40(%esp), %ecx # 4-byte Reload
+ adcl 592(%esp), %ecx
+ movl %ecx, 40(%esp) # 4-byte Spill
+ movl 28(%esp), %ecx # 4-byte Reload
+ adcl 596(%esp), %ecx
+ movl %ecx, 28(%esp) # 4-byte Spill
+ movl 36(%esp), %ecx # 4-byte Reload
+ adcl 600(%esp), %ecx
+ movl %ecx, 36(%esp) # 4-byte Spill
+ movl 48(%esp), %ecx # 4-byte Reload
+ adcl 604(%esp), %ecx
+ movl %ecx, 48(%esp) # 4-byte Spill
+ movl 52(%esp), %ecx # 4-byte Reload
+ adcl 608(%esp), %ecx
+ movl %ecx, 52(%esp) # 4-byte Spill
+ adcl 612(%esp), %ebp
+ movl %ebp, 56(%esp) # 4-byte Spill
+ adcl 616(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ sbbl %ebp, %ebp
+ movl %edi, %eax
+ imull 24(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 544(%esp), %ecx
+ movl 732(%esp), %eax
+ movl %eax, %edx
+ calll .LmulPv256x32
+ andl $1, %ebp
+ addl 544(%esp), %edi
+ adcl 548(%esp), %esi
+ movl 40(%esp), %eax # 4-byte Reload
+ adcl 552(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 28(%esp), %eax # 4-byte Reload
+ adcl 556(%esp), %eax
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl 36(%esp), %eax # 4-byte Reload
+ adcl 560(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 564(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 52(%esp), %edi # 4-byte Reload
+ adcl 568(%esp), %edi
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 572(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 576(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ adcl $0, %ebp
+ movl 728(%esp), %eax
+ movl 8(%eax), %eax
+ movl %eax, (%esp)
+ leal 504(%esp), %ecx
+ movl 724(%esp), %edx
+ calll .LmulPv256x32
+ addl 504(%esp), %esi
+ movl 40(%esp), %eax # 4-byte Reload
+ adcl 508(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 28(%esp), %eax # 4-byte Reload
+ adcl 512(%esp), %eax
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl 36(%esp), %eax # 4-byte Reload
+ adcl 516(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 520(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ adcl 524(%esp), %edi
+ movl %edi, 52(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 528(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 532(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ adcl 536(%esp), %ebp
+ sbbl %edi, %edi
+ movl %esi, %eax
+ imull 24(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 464(%esp), %ecx
+ movl 732(%esp), %edx
+ calll .LmulPv256x32
+ movl %edi, %eax
+ andl $1, %eax
+ addl 464(%esp), %esi
+ movl 40(%esp), %ecx # 4-byte Reload
+ adcl 468(%esp), %ecx
+ movl %ecx, 40(%esp) # 4-byte Spill
+ movl 28(%esp), %ecx # 4-byte Reload
+ adcl 472(%esp), %ecx
+ movl %ecx, 28(%esp) # 4-byte Spill
+ movl 36(%esp), %ecx # 4-byte Reload
+ adcl 476(%esp), %ecx
+ movl %ecx, 36(%esp) # 4-byte Spill
+ movl 48(%esp), %edi # 4-byte Reload
+ adcl 480(%esp), %edi
+ movl 52(%esp), %ecx # 4-byte Reload
+ adcl 484(%esp), %ecx
+ movl %ecx, 52(%esp) # 4-byte Spill
+ movl 56(%esp), %esi # 4-byte Reload
+ adcl 488(%esp), %esi
+ movl 60(%esp), %ecx # 4-byte Reload
+ adcl 492(%esp), %ecx
+ movl %ecx, 60(%esp) # 4-byte Spill
+ adcl 496(%esp), %ebp
+ movl %ebp, 32(%esp) # 4-byte Spill
+ adcl $0, %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 728(%esp), %eax
+ movl 12(%eax), %eax
+ movl %eax, (%esp)
+ leal 424(%esp), %ecx
+ movl 724(%esp), %eax
+ movl %eax, %edx
+ calll .LmulPv256x32
+ movl 40(%esp), %ecx # 4-byte Reload
+ addl 424(%esp), %ecx
+ movl 28(%esp), %ebp # 4-byte Reload
+ adcl 428(%esp), %ebp
+ movl 36(%esp), %eax # 4-byte Reload
+ adcl 432(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ adcl 436(%esp), %edi
+ movl %edi, 48(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 440(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ adcl 444(%esp), %esi
+ movl %esi, 56(%esp) # 4-byte Spill
+ movl 60(%esp), %edi # 4-byte Reload
+ adcl 448(%esp), %edi
+ movl 32(%esp), %eax # 4-byte Reload
+ adcl 452(%esp), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 456(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ sbbl %eax, %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ movl %ecx, %esi
+ imull 24(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 384(%esp), %ecx
+ movl 732(%esp), %edx
+ calll .LmulPv256x32
+ movl 40(%esp), %eax # 4-byte Reload
+ andl $1, %eax
+ addl 384(%esp), %esi
+ adcl 388(%esp), %ebp
+ movl %ebp, 28(%esp) # 4-byte Spill
+ movl 36(%esp), %esi # 4-byte Reload
+ adcl 392(%esp), %esi
+ movl 48(%esp), %ecx # 4-byte Reload
+ adcl 396(%esp), %ecx
+ movl %ecx, 48(%esp) # 4-byte Spill
+ movl 52(%esp), %ecx # 4-byte Reload
+ adcl 400(%esp), %ecx
+ movl %ecx, 52(%esp) # 4-byte Spill
+ movl 56(%esp), %ecx # 4-byte Reload
+ adcl 404(%esp), %ecx
+ movl %ecx, 56(%esp) # 4-byte Spill
+ adcl 408(%esp), %edi
+ movl 32(%esp), %ecx # 4-byte Reload
+ adcl 412(%esp), %ecx
+ movl %ecx, 32(%esp) # 4-byte Spill
+ movl 44(%esp), %ebp # 4-byte Reload
+ adcl 416(%esp), %ebp
+ adcl $0, %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 728(%esp), %eax
+ movl 16(%eax), %eax
+ movl %eax, (%esp)
+ leal 344(%esp), %ecx
+ movl 724(%esp), %edx
+ calll .LmulPv256x32
+ movl 28(%esp), %ecx # 4-byte Reload
+ addl 344(%esp), %ecx
+ adcl 348(%esp), %esi
+ movl %esi, 36(%esp) # 4-byte Spill
+ movl 48(%esp), %esi # 4-byte Reload
+ adcl 352(%esp), %esi
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 356(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 360(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ adcl 364(%esp), %edi
+ movl %edi, 60(%esp) # 4-byte Spill
+ movl 32(%esp), %eax # 4-byte Reload
+ adcl 368(%esp), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ adcl 372(%esp), %ebp
+ movl %ebp, 44(%esp) # 4-byte Spill
+ movl 40(%esp), %eax # 4-byte Reload
+ adcl 376(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ sbbl %ebp, %ebp
+ movl %ecx, %eax
+ movl %ecx, %edi
+ imull 24(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 304(%esp), %ecx
+ movl 732(%esp), %edx
+ calll .LmulPv256x32
+ movl %ebp, %eax
+ andl $1, %eax
+ addl 304(%esp), %edi
+ movl 36(%esp), %ecx # 4-byte Reload
+ adcl 308(%esp), %ecx
+ movl %ecx, 36(%esp) # 4-byte Spill
+ adcl 312(%esp), %esi
+ movl %esi, 48(%esp) # 4-byte Spill
+ movl 52(%esp), %ebp # 4-byte Reload
+ adcl 316(%esp), %ebp
+ movl 56(%esp), %ecx # 4-byte Reload
+ adcl 320(%esp), %ecx
+ movl %ecx, 56(%esp) # 4-byte Spill
+ movl 60(%esp), %ecx # 4-byte Reload
+ adcl 324(%esp), %ecx
+ movl %ecx, 60(%esp) # 4-byte Spill
+ movl 32(%esp), %esi # 4-byte Reload
+ adcl 328(%esp), %esi
+ movl 44(%esp), %ecx # 4-byte Reload
+ adcl 332(%esp), %ecx
+ movl %ecx, 44(%esp) # 4-byte Spill
+ movl 40(%esp), %edi # 4-byte Reload
+ adcl 336(%esp), %edi
+ adcl $0, %eax
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl 728(%esp), %eax
+ movl 20(%eax), %eax
+ movl %eax, (%esp)
+ leal 264(%esp), %ecx
+ movl 724(%esp), %edx
+ calll .LmulPv256x32
+ movl 36(%esp), %ecx # 4-byte Reload
+ addl 264(%esp), %ecx
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 268(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ adcl 272(%esp), %ebp
+ movl %ebp, 52(%esp) # 4-byte Spill
+ movl 56(%esp), %ebp # 4-byte Reload
+ adcl 276(%esp), %ebp
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 280(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ adcl 284(%esp), %esi
+ movl %esi, 32(%esp) # 4-byte Spill
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 288(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ adcl 292(%esp), %edi
+ movl %edi, 40(%esp) # 4-byte Spill
+ movl 28(%esp), %eax # 4-byte Reload
+ adcl 296(%esp), %eax
+ movl %eax, 28(%esp) # 4-byte Spill
+ sbbl %edi, %edi
+ movl %ecx, %eax
+ movl %ecx, %esi
+ imull 24(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 224(%esp), %ecx
+ movl 732(%esp), %edx
+ calll .LmulPv256x32
+ movl %edi, %eax
+ andl $1, %eax
+ addl 224(%esp), %esi
+ movl 48(%esp), %ecx # 4-byte Reload
+ adcl 228(%esp), %ecx
+ movl %ecx, 48(%esp) # 4-byte Spill
+ movl 52(%esp), %esi # 4-byte Reload
+ adcl 232(%esp), %esi
+ adcl 236(%esp), %ebp
+ movl %ebp, 56(%esp) # 4-byte Spill
+ movl 60(%esp), %edi # 4-byte Reload
+ adcl 240(%esp), %edi
+ movl 32(%esp), %ecx # 4-byte Reload
+ adcl 244(%esp), %ecx
+ movl %ecx, 32(%esp) # 4-byte Spill
+ movl 44(%esp), %ebp # 4-byte Reload
+ adcl 248(%esp), %ebp
+ movl 40(%esp), %ecx # 4-byte Reload
+ adcl 252(%esp), %ecx
+ movl %ecx, 40(%esp) # 4-byte Spill
+ movl 28(%esp), %ecx # 4-byte Reload
+ adcl 256(%esp), %ecx
+ movl %ecx, 28(%esp) # 4-byte Spill
+ adcl $0, %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 728(%esp), %eax
+ movl 24(%eax), %eax
+ movl %eax, (%esp)
+ leal 184(%esp), %ecx
+ movl 724(%esp), %edx
+ calll .LmulPv256x32
+ movl 48(%esp), %ecx # 4-byte Reload
+ addl 184(%esp), %ecx
+ adcl 188(%esp), %esi
+ movl %esi, 52(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 192(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ adcl 196(%esp), %edi
+ movl %edi, 60(%esp) # 4-byte Spill
+ movl 32(%esp), %eax # 4-byte Reload
+ adcl 200(%esp), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ adcl 204(%esp), %ebp
+ movl %ebp, 44(%esp) # 4-byte Spill
+ movl 40(%esp), %eax # 4-byte Reload
+ adcl 208(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 28(%esp), %ebp # 4-byte Reload
+ adcl 212(%esp), %ebp
+ movl 36(%esp), %eax # 4-byte Reload
+ adcl 216(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ sbbl %edi, %edi
+ movl %ecx, %eax
+ movl %ecx, %esi
+ imull 24(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 144(%esp), %ecx
+ movl 732(%esp), %edx
+ calll .LmulPv256x32
+ movl %edi, %ecx
+ andl $1, %ecx
+ addl 144(%esp), %esi
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 148(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 56(%esp), %esi # 4-byte Reload
+ adcl 152(%esp), %esi
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 156(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 32(%esp), %edi # 4-byte Reload
+ adcl 160(%esp), %edi
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 164(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 40(%esp), %eax # 4-byte Reload
+ adcl 168(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ adcl 172(%esp), %ebp
+ movl %ebp, 28(%esp) # 4-byte Spill
+ movl 36(%esp), %ebp # 4-byte Reload
+ adcl 176(%esp), %ebp
+ adcl $0, %ecx
+ movl %ecx, 48(%esp) # 4-byte Spill
+ movl 728(%esp), %eax
+ movl 28(%eax), %eax
+ movl %eax, (%esp)
+ leal 104(%esp), %ecx
+ movl 724(%esp), %edx
+ calll .LmulPv256x32
+ movl 52(%esp), %ecx # 4-byte Reload
+ addl 104(%esp), %ecx
+ adcl 108(%esp), %esi
+ movl %esi, 56(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 112(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ adcl 116(%esp), %edi
+ movl %edi, 32(%esp) # 4-byte Spill
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 120(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 40(%esp), %eax # 4-byte Reload
+ adcl 124(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 28(%esp), %edi # 4-byte Reload
+ adcl 128(%esp), %edi
+ adcl 132(%esp), %ebp
+ movl %ebp, 36(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 136(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ sbbl %esi, %esi
+ movl 24(%esp), %eax # 4-byte Reload
+ imull %ecx, %eax
+ movl %ecx, %ebp
+ movl %eax, (%esp)
+ leal 64(%esp), %ecx
+ movl 732(%esp), %edx
+ calll .LmulPv256x32
+ andl $1, %esi
+ addl 64(%esp), %ebp
+ movl 32(%esp), %ebx # 4-byte Reload
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 68(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 60(%esp), %ecx # 4-byte Reload
+ adcl 72(%esp), %ecx
+ movl %ecx, 60(%esp) # 4-byte Spill
+ adcl 76(%esp), %ebx
+ movl 44(%esp), %edx # 4-byte Reload
+ adcl 80(%esp), %edx
+ movl %edx, 44(%esp) # 4-byte Spill
+ movl 40(%esp), %edx # 4-byte Reload
+ adcl 84(%esp), %edx
+ movl %edx, 40(%esp) # 4-byte Spill
+ adcl 88(%esp), %edi
+ movl %edi, 28(%esp) # 4-byte Spill
+ movl 36(%esp), %edx # 4-byte Reload
+ adcl 92(%esp), %edx
+ movl %edx, 36(%esp) # 4-byte Spill
+ movl 48(%esp), %edx # 4-byte Reload
+ adcl 96(%esp), %edx
+ movl %edx, 48(%esp) # 4-byte Spill
+ adcl $0, %esi
+ movl %eax, %edx
+ movl 732(%esp), %ebp
+ subl (%ebp), %edx
+ movl %ecx, %eax
+ sbbl 4(%ebp), %eax
+ movl %ebx, %ecx
+ sbbl 8(%ebp), %ecx
+ movl %ecx, 12(%esp) # 4-byte Spill
+ movl 44(%esp), %ecx # 4-byte Reload
+ sbbl 12(%ebp), %ecx
+ movl %ecx, 16(%esp) # 4-byte Spill
+ movl 40(%esp), %ecx # 4-byte Reload
+ sbbl 16(%ebp), %ecx
+ movl %ecx, 20(%esp) # 4-byte Spill
+ sbbl 20(%ebp), %edi
+ movl %edi, 24(%esp) # 4-byte Spill
+ movl 36(%esp), %ecx # 4-byte Reload
+ sbbl 24(%ebp), %ecx
+ movl %ecx, 32(%esp) # 4-byte Spill
+ movl 48(%esp), %ecx # 4-byte Reload
+ sbbl 28(%ebp), %ecx
+ movl %ecx, 52(%esp) # 4-byte Spill
+ movl 56(%esp), %ebp # 4-byte Reload
+ sbbl $0, %esi
+ andl $1, %esi
+ movl %esi, %ecx
+ jne .LBB114_2
+# BB#1:
+ movl %edx, %ebp
+.LBB114_2:
+ movl 720(%esp), %edx
+ movl %ebp, (%edx)
+ testb %cl, %cl
+ movl 60(%esp), %ebp # 4-byte Reload
+ jne .LBB114_4
+# BB#3:
+ movl %eax, %ebp
+.LBB114_4:
+ movl %ebp, 4(%edx)
+ jne .LBB114_6
+# BB#5:
+ movl 12(%esp), %ebx # 4-byte Reload
+.LBB114_6:
+ movl %ebx, 8(%edx)
+ movl 28(%esp), %eax # 4-byte Reload
+ jne .LBB114_8
+# BB#7:
+ movl 16(%esp), %ecx # 4-byte Reload
+ movl %ecx, 44(%esp) # 4-byte Spill
+.LBB114_8:
+ movl 44(%esp), %ecx # 4-byte Reload
+ movl %ecx, 12(%edx)
+ movl 40(%esp), %edi # 4-byte Reload
+ jne .LBB114_10
+# BB#9:
+ movl 20(%esp), %edi # 4-byte Reload
+.LBB114_10:
+ movl %edi, 16(%edx)
+ jne .LBB114_12
+# BB#11:
+ movl 24(%esp), %eax # 4-byte Reload
+.LBB114_12:
+ movl %eax, 20(%edx)
+ movl 36(%esp), %eax # 4-byte Reload
+ jne .LBB114_14
+# BB#13:
+ movl 32(%esp), %eax # 4-byte Reload
+.LBB114_14:
+ movl %eax, 24(%edx)
+ movl 48(%esp), %eax # 4-byte Reload
+ jne .LBB114_16
+# BB#15:
+ movl 52(%esp), %eax # 4-byte Reload
+.LBB114_16:
+ movl %eax, 28(%edx)
+ addl $700, %esp # imm = 0x2BC
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end114:
+ .size mcl_fp_mont8L, .Lfunc_end114-mcl_fp_mont8L
+
+ .globl mcl_fp_montNF8L
+ .align 16, 0x90
+ .type mcl_fp_montNF8L,@function
+mcl_fp_montNF8L: # @mcl_fp_montNF8L
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $700, %esp # imm = 0x2BC
+ calll .L115$pb
+.L115$pb:
+ popl %ebx
+.Ltmp6:
+ addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp6-.L115$pb), %ebx
+ movl 732(%esp), %eax
+ movl -4(%eax), %esi
+ movl %esi, 28(%esp) # 4-byte Spill
+ movl 728(%esp), %eax
+ movl (%eax), %eax
+ movl %eax, (%esp)
+ leal 664(%esp), %ecx
+ movl 724(%esp), %edx
+ calll .LmulPv256x32
+ movl 664(%esp), %ebp
+ movl 668(%esp), %edi
+ movl %ebp, %eax
+ imull %esi, %eax
+ movl 696(%esp), %ecx
+ movl %ecx, 56(%esp) # 4-byte Spill
+ movl 692(%esp), %ecx
+ movl %ecx, 60(%esp) # 4-byte Spill
+ movl 688(%esp), %ecx
+ movl %ecx, 48(%esp) # 4-byte Spill
+ movl 684(%esp), %ecx
+ movl %ecx, 40(%esp) # 4-byte Spill
+ movl 680(%esp), %ecx
+ movl %ecx, 36(%esp) # 4-byte Spill
+ movl 676(%esp), %ecx
+ movl %ecx, 44(%esp) # 4-byte Spill
+ movl 672(%esp), %esi
+ movl %eax, (%esp)
+ leal 624(%esp), %ecx
+ movl 732(%esp), %edx
+ calll .LmulPv256x32
+ addl 624(%esp), %ebp
+ adcl 628(%esp), %edi
+ adcl 632(%esp), %esi
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 636(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 36(%esp), %ebp # 4-byte Reload
+ adcl 640(%esp), %ebp
+ movl 40(%esp), %eax # 4-byte Reload
+ adcl 644(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 648(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 652(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 656(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 728(%esp), %eax
+ movl 4(%eax), %eax
+ movl %eax, (%esp)
+ leal 584(%esp), %ecx
+ movl 724(%esp), %edx
+ calll .LmulPv256x32
+ movl 616(%esp), %ecx
+ addl 584(%esp), %edi
+ adcl 588(%esp), %esi
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 592(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ adcl 596(%esp), %ebp
+ movl %ebp, 36(%esp) # 4-byte Spill
+ movl 40(%esp), %eax # 4-byte Reload
+ adcl 600(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 48(%esp), %ebp # 4-byte Reload
+ adcl 604(%esp), %ebp
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 608(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 612(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ adcl $0, %ecx
+ movl %ecx, 52(%esp) # 4-byte Spill
+ movl %edi, %eax
+ imull 28(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 544(%esp), %ecx
+ movl 732(%esp), %eax
+ movl %eax, %edx
+ calll .LmulPv256x32
+ addl 544(%esp), %edi
+ adcl 548(%esp), %esi
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 552(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 36(%esp), %eax # 4-byte Reload
+ adcl 556(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 40(%esp), %eax # 4-byte Reload
+ adcl 560(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ adcl 564(%esp), %ebp
+ movl %ebp, 48(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 568(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 56(%esp), %edi # 4-byte Reload
+ adcl 572(%esp), %edi
+ movl 52(%esp), %ebp # 4-byte Reload
+ adcl 576(%esp), %ebp
+ movl 728(%esp), %eax
+ movl 8(%eax), %eax
+ movl %eax, (%esp)
+ leal 504(%esp), %ecx
+ movl 724(%esp), %edx
+ calll .LmulPv256x32
+ movl 536(%esp), %ecx
+ addl 504(%esp), %esi
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 508(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 36(%esp), %eax # 4-byte Reload
+ adcl 512(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 40(%esp), %eax # 4-byte Reload
+ adcl 516(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 520(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 524(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ adcl 528(%esp), %edi
+ adcl 532(%esp), %ebp
+ movl %ebp, 52(%esp) # 4-byte Spill
+ adcl $0, %ecx
+ movl %ecx, 32(%esp) # 4-byte Spill
+ movl %esi, %eax
+ imull 28(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 464(%esp), %ecx
+ movl 732(%esp), %edx
+ calll .LmulPv256x32
+ addl 464(%esp), %esi
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 468(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 36(%esp), %ebp # 4-byte Reload
+ adcl 472(%esp), %ebp
+ movl 40(%esp), %eax # 4-byte Reload
+ adcl 476(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 480(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 60(%esp), %esi # 4-byte Reload
+ adcl 484(%esp), %esi
+ adcl 488(%esp), %edi
+ movl %edi, 56(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 492(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 32(%esp), %edi # 4-byte Reload
+ adcl 496(%esp), %edi
+ movl 728(%esp), %eax
+ movl 12(%eax), %eax
+ movl %eax, (%esp)
+ leal 424(%esp), %ecx
+ movl 724(%esp), %edx
+ calll .LmulPv256x32
+ movl 456(%esp), %eax
+ movl 44(%esp), %edx # 4-byte Reload
+ addl 424(%esp), %edx
+ adcl 428(%esp), %ebp
+ movl %ebp, 36(%esp) # 4-byte Spill
+ movl 40(%esp), %ecx # 4-byte Reload
+ adcl 432(%esp), %ecx
+ movl %ecx, 40(%esp) # 4-byte Spill
+ movl 48(%esp), %ecx # 4-byte Reload
+ adcl 436(%esp), %ecx
+ movl %ecx, 48(%esp) # 4-byte Spill
+ adcl 440(%esp), %esi
+ movl %esi, 60(%esp) # 4-byte Spill
+ movl 56(%esp), %ecx # 4-byte Reload
+ adcl 444(%esp), %ecx
+ movl %ecx, 56(%esp) # 4-byte Spill
+ movl 52(%esp), %ecx # 4-byte Reload
+ adcl 448(%esp), %ecx
+ movl %ecx, 52(%esp) # 4-byte Spill
+ adcl 452(%esp), %edi
+ movl %edi, %ebp
+ movl %eax, %edi
+ adcl $0, %edi
+ movl %edx, %eax
+ movl %edx, %esi
+ imull 28(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 384(%esp), %ecx
+ movl 732(%esp), %edx
+ calll .LmulPv256x32
+ addl 384(%esp), %esi
+ movl 36(%esp), %eax # 4-byte Reload
+ adcl 388(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 40(%esp), %eax # 4-byte Reload
+ adcl 392(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 48(%esp), %esi # 4-byte Reload
+ adcl 396(%esp), %esi
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 400(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 404(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 408(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ adcl 412(%esp), %ebp
+ adcl 416(%esp), %edi
+ movl %edi, 44(%esp) # 4-byte Spill
+ movl 728(%esp), %eax
+ movl 16(%eax), %eax
+ movl %eax, (%esp)
+ leal 344(%esp), %ecx
+ movl 724(%esp), %edx
+ calll .LmulPv256x32
+ movl 376(%esp), %edx
+ movl 36(%esp), %ecx # 4-byte Reload
+ addl 344(%esp), %ecx
+ movl 40(%esp), %eax # 4-byte Reload
+ adcl 348(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ adcl 352(%esp), %esi
+ movl %esi, 48(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 356(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 56(%esp), %esi # 4-byte Reload
+ adcl 360(%esp), %esi
+ movl 52(%esp), %edi # 4-byte Reload
+ adcl 364(%esp), %edi
+ adcl 368(%esp), %ebp
+ movl %ebp, 32(%esp) # 4-byte Spill
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 372(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ adcl $0, %edx
+ movl %edx, 36(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ movl %ecx, %ebp
+ imull 28(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 304(%esp), %ecx
+ movl 732(%esp), %edx
+ calll .LmulPv256x32
+ addl 304(%esp), %ebp
+ movl 40(%esp), %ebp # 4-byte Reload
+ adcl 308(%esp), %ebp
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 312(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 316(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ adcl 320(%esp), %esi
+ movl %esi, 56(%esp) # 4-byte Spill
+ adcl 324(%esp), %edi
+ movl %edi, 52(%esp) # 4-byte Spill
+ movl 32(%esp), %esi # 4-byte Reload
+ adcl 328(%esp), %esi
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 332(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 36(%esp), %edi # 4-byte Reload
+ adcl 336(%esp), %edi
+ movl 728(%esp), %eax
+ movl 20(%eax), %eax
+ movl %eax, (%esp)
+ leal 264(%esp), %ecx
+ movl 724(%esp), %eax
+ movl %eax, %edx
+ calll .LmulPv256x32
+ movl 296(%esp), %edx
+ movl %ebp, %ecx
+ addl 264(%esp), %ecx
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 268(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 272(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 276(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 52(%esp), %ebp # 4-byte Reload
+ adcl 280(%esp), %ebp
+ adcl 284(%esp), %esi
+ movl %esi, 32(%esp) # 4-byte Spill
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 288(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ adcl 292(%esp), %edi
+ movl %edi, 36(%esp) # 4-byte Spill
+ movl %edx, %edi
+ adcl $0, %edi
+ movl %ecx, %eax
+ movl %ecx, %esi
+ imull 28(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 224(%esp), %ecx
+ movl 732(%esp), %edx
+ calll .LmulPv256x32
+ addl 224(%esp), %esi
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 228(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 232(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 56(%esp), %esi # 4-byte Reload
+ adcl 236(%esp), %esi
+ adcl 240(%esp), %ebp
+ movl %ebp, 52(%esp) # 4-byte Spill
+ movl 32(%esp), %eax # 4-byte Reload
+ adcl 244(%esp), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 248(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 36(%esp), %eax # 4-byte Reload
+ adcl 252(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ adcl 256(%esp), %edi
+ movl %edi, 40(%esp) # 4-byte Spill
+ movl 728(%esp), %eax
+ movl 24(%eax), %eax
+ movl %eax, (%esp)
+ leal 184(%esp), %ecx
+ movl 724(%esp), %edx
+ calll .LmulPv256x32
+ movl 216(%esp), %ebp
+ movl 48(%esp), %ecx # 4-byte Reload
+ addl 184(%esp), %ecx
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 188(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ adcl 192(%esp), %esi
+ movl %esi, 56(%esp) # 4-byte Spill
+ movl 52(%esp), %edi # 4-byte Reload
+ adcl 196(%esp), %edi
+ movl 32(%esp), %eax # 4-byte Reload
+ adcl 200(%esp), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 204(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 36(%esp), %eax # 4-byte Reload
+ adcl 208(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 40(%esp), %eax # 4-byte Reload
+ adcl 212(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ adcl $0, %ebp
+ movl %ecx, %eax
+ movl %ecx, %esi
+ imull 28(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 144(%esp), %ecx
+ movl 732(%esp), %edx
+ calll .LmulPv256x32
+ addl 144(%esp), %esi
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 148(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 152(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ adcl 156(%esp), %edi
+ movl %edi, 52(%esp) # 4-byte Spill
+ movl 32(%esp), %esi # 4-byte Reload
+ adcl 160(%esp), %esi
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 164(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 36(%esp), %eax # 4-byte Reload
+ adcl 168(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 40(%esp), %eax # 4-byte Reload
+ adcl 172(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ adcl 176(%esp), %ebp
+ movl 728(%esp), %eax
+ movl 28(%eax), %eax
+ movl %eax, (%esp)
+ leal 104(%esp), %ecx
+ movl 724(%esp), %edx
+ calll .LmulPv256x32
+ movl 136(%esp), %edi
+ movl 60(%esp), %ecx # 4-byte Reload
+ addl 104(%esp), %ecx
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 108(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 112(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ adcl 116(%esp), %esi
+ movl %esi, 32(%esp) # 4-byte Spill
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 120(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 36(%esp), %eax # 4-byte Reload
+ adcl 124(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 40(%esp), %eax # 4-byte Reload
+ adcl 128(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ adcl 132(%esp), %ebp
+ adcl $0, %edi
+ movl 28(%esp), %eax # 4-byte Reload
+ imull %ecx, %eax
+ movl %ecx, %esi
+ movl %eax, (%esp)
+ leal 64(%esp), %ecx
+ movl 732(%esp), %edx
+ calll .LmulPv256x32
+ addl 64(%esp), %esi
+ movl 32(%esp), %esi # 4-byte Reload
+ movl 56(%esp), %eax # 4-byte Reload
+ movl 44(%esp), %ebx # 4-byte Reload
+ adcl 68(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 52(%esp), %ecx # 4-byte Reload
+ adcl 72(%esp), %ecx
+ movl %ecx, 52(%esp) # 4-byte Spill
+ adcl 76(%esp), %esi
+ movl %esi, 32(%esp) # 4-byte Spill
+ adcl 80(%esp), %ebx
+ movl %ebx, 44(%esp) # 4-byte Spill
+ movl 36(%esp), %edx # 4-byte Reload
+ adcl 84(%esp), %edx
+ movl %edx, 36(%esp) # 4-byte Spill
+ movl 40(%esp), %edx # 4-byte Reload
+ adcl 88(%esp), %edx
+ movl %edx, 40(%esp) # 4-byte Spill
+ adcl 92(%esp), %ebp
+ movl %ebp, 60(%esp) # 4-byte Spill
+ adcl 96(%esp), %edi
+ movl %edi, 48(%esp) # 4-byte Spill
+ movl %eax, %edx
+ movl 732(%esp), %eax
+ subl (%eax), %edx
+ sbbl 4(%eax), %ecx
+ sbbl 8(%eax), %esi
+ sbbl 12(%eax), %ebx
+ movl %ebx, 12(%esp) # 4-byte Spill
+ movl 36(%esp), %ebx # 4-byte Reload
+ sbbl 16(%eax), %ebx
+ movl %ebx, 16(%esp) # 4-byte Spill
+ movl 40(%esp), %ebx # 4-byte Reload
+ sbbl 20(%eax), %ebx
+ movl %ebx, 20(%esp) # 4-byte Spill
+ sbbl 24(%eax), %ebp
+ movl %ebp, 24(%esp) # 4-byte Spill
+ sbbl 28(%eax), %edi
+ movl %edi, 28(%esp) # 4-byte Spill
+ testl %edi, %edi
+ js .LBB115_2
+# BB#1:
+ movl %edx, 56(%esp) # 4-byte Spill
+.LBB115_2:
+ movl 720(%esp), %edx
+ movl 56(%esp), %eax # 4-byte Reload
+ movl %eax, (%edx)
+ movl 52(%esp), %eax # 4-byte Reload
+ js .LBB115_4
+# BB#3:
+ movl %ecx, %eax
+.LBB115_4:
+ movl %eax, 4(%edx)
+ js .LBB115_6
+# BB#5:
+ movl %esi, 32(%esp) # 4-byte Spill
+.LBB115_6:
+ movl 32(%esp), %eax # 4-byte Reload
+ movl %eax, 8(%edx)
+ movl 36(%esp), %edi # 4-byte Reload
+ movl 40(%esp), %ebp # 4-byte Reload
+ movl 60(%esp), %eax # 4-byte Reload
+ movl 48(%esp), %ecx # 4-byte Reload
+ js .LBB115_8
+# BB#7:
+ movl 12(%esp), %esi # 4-byte Reload
+ movl %esi, 44(%esp) # 4-byte Spill
+.LBB115_8:
+ movl 44(%esp), %esi # 4-byte Reload
+ movl %esi, 12(%edx)
+ js .LBB115_10
+# BB#9:
+ movl 16(%esp), %edi # 4-byte Reload
+.LBB115_10:
+ movl %edi, 16(%edx)
+ js .LBB115_12
+# BB#11:
+ movl 20(%esp), %ebp # 4-byte Reload
+.LBB115_12:
+ movl %ebp, 20(%edx)
+ js .LBB115_14
+# BB#13:
+ movl 24(%esp), %eax # 4-byte Reload
+.LBB115_14:
+ movl %eax, 24(%edx)
+ js .LBB115_16
+# BB#15:
+ movl 28(%esp), %ecx # 4-byte Reload
+.LBB115_16:
+ movl %ecx, 28(%edx)
+ addl $700, %esp # imm = 0x2BC
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end115:
+ .size mcl_fp_montNF8L, .Lfunc_end115-mcl_fp_montNF8L
+
+ .globl mcl_fp_montRed8L
+ .align 16, 0x90
+ .type mcl_fp_montRed8L,@function
+mcl_fp_montRed8L: # @mcl_fp_montRed8L
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $428, %esp # imm = 0x1AC
+ calll .L116$pb
+.L116$pb:
+ popl %ebx
+.Ltmp7:
+ addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp7-.L116$pb), %ebx
+ movl 456(%esp), %edx
+ movl -4(%edx), %edi
+ movl %edi, 60(%esp) # 4-byte Spill
+ movl 452(%esp), %eax
+ movl (%eax), %esi
+ movl %esi, 56(%esp) # 4-byte Spill
+ movl 4(%eax), %ecx
+ movl %ecx, 64(%esp) # 4-byte Spill
+ movl %esi, %ecx
+ imull %edi, %ecx
+ movl 60(%eax), %edi
+ movl %edi, 84(%esp) # 4-byte Spill
+ movl 56(%eax), %edi
+ movl %edi, 88(%esp) # 4-byte Spill
+ movl 52(%eax), %esi
+ movl %esi, 80(%esp) # 4-byte Spill
+ movl 48(%eax), %esi
+ movl %esi, 100(%esp) # 4-byte Spill
+ movl 44(%eax), %esi
+ movl %esi, 76(%esp) # 4-byte Spill
+ movl 40(%eax), %edi
+ movl %edi, 104(%esp) # 4-byte Spill
+ movl 36(%eax), %edi
+ movl %edi, 108(%esp) # 4-byte Spill
+ movl 32(%eax), %edi
+ movl %edi, 92(%esp) # 4-byte Spill
+ movl 28(%eax), %edi
+ movl %edi, 96(%esp) # 4-byte Spill
+ movl 24(%eax), %edi
+ movl %edi, 72(%esp) # 4-byte Spill
+ movl 20(%eax), %edi
+ movl %edi, 68(%esp) # 4-byte Spill
+ movl 16(%eax), %ebp
+ movl 12(%eax), %edi
+ movl 8(%eax), %esi
+ movl (%edx), %eax
+ movl %eax, 24(%esp) # 4-byte Spill
+ movl 28(%edx), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 24(%edx), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 20(%edx), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 16(%edx), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 12(%edx), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 8(%edx), %eax
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl 4(%edx), %eax
+ movl %eax, 20(%esp) # 4-byte Spill
+ movl %ecx, (%esp)
+ leal 392(%esp), %ecx
+ calll .LmulPv256x32
+ movl 56(%esp), %eax # 4-byte Reload
+ addl 392(%esp), %eax
+ movl 64(%esp), %ecx # 4-byte Reload
+ adcl 396(%esp), %ecx
+ adcl 400(%esp), %esi
+ movl %esi, 16(%esp) # 4-byte Spill
+ adcl 404(%esp), %edi
+ movl %edi, 52(%esp) # 4-byte Spill
+ adcl 408(%esp), %ebp
+ movl %ebp, 56(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 412(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 416(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 420(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 424(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ adcl $0, 108(%esp) # 4-byte Folded Spill
+ adcl $0, 104(%esp) # 4-byte Folded Spill
+ movl 76(%esp), %esi # 4-byte Reload
+ adcl $0, %esi
+ adcl $0, 100(%esp) # 4-byte Folded Spill
+ movl 80(%esp), %ebp # 4-byte Reload
+ adcl $0, %ebp
+ adcl $0, 88(%esp) # 4-byte Folded Spill
+ adcl $0, 84(%esp) # 4-byte Folded Spill
+ sbbl %eax, %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl %ecx, %edi
+ movl %edi, %eax
+ imull 60(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 352(%esp), %ecx
+ movl 456(%esp), %edx
+ calll .LmulPv256x32
+ movl 64(%esp), %eax # 4-byte Reload
+ andl $1, %eax
+ addl 352(%esp), %edi
+ movl 16(%esp), %edx # 4-byte Reload
+ adcl 356(%esp), %edx
+ movl 52(%esp), %ecx # 4-byte Reload
+ adcl 360(%esp), %ecx
+ movl %ecx, 52(%esp) # 4-byte Spill
+ movl 56(%esp), %ecx # 4-byte Reload
+ adcl 364(%esp), %ecx
+ movl %ecx, 56(%esp) # 4-byte Spill
+ movl 68(%esp), %ecx # 4-byte Reload
+ adcl 368(%esp), %ecx
+ movl %ecx, 68(%esp) # 4-byte Spill
+ movl 72(%esp), %ecx # 4-byte Reload
+ adcl 372(%esp), %ecx
+ movl %ecx, 72(%esp) # 4-byte Spill
+ movl 96(%esp), %ecx # 4-byte Reload
+ adcl 376(%esp), %ecx
+ movl %ecx, 96(%esp) # 4-byte Spill
+ movl 92(%esp), %ecx # 4-byte Reload
+ adcl 380(%esp), %ecx
+ movl %ecx, 92(%esp) # 4-byte Spill
+ movl 108(%esp), %ecx # 4-byte Reload
+ adcl 384(%esp), %ecx
+ movl %ecx, 108(%esp) # 4-byte Spill
+ adcl $0, 104(%esp) # 4-byte Folded Spill
+ adcl $0, %esi
+ adcl $0, 100(%esp) # 4-byte Folded Spill
+ adcl $0, %ebp
+ movl %ebp, 80(%esp) # 4-byte Spill
+ adcl $0, 88(%esp) # 4-byte Folded Spill
+ adcl $0, 84(%esp) # 4-byte Folded Spill
+ adcl $0, %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl %edx, %edi
+ movl %edi, %eax
+ imull 60(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 312(%esp), %ecx
+ movl 456(%esp), %edx
+ calll .LmulPv256x32
+ addl 312(%esp), %edi
+ movl 52(%esp), %edi # 4-byte Reload
+ adcl 316(%esp), %edi
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 320(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 324(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 328(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 332(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 336(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 340(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 344(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ adcl $0, %esi
+ movl %esi, 76(%esp) # 4-byte Spill
+ movl 100(%esp), %ebp # 4-byte Reload
+ adcl $0, %ebp
+ movl 80(%esp), %esi # 4-byte Reload
+ adcl $0, %esi
+ adcl $0, 88(%esp) # 4-byte Folded Spill
+ adcl $0, 84(%esp) # 4-byte Folded Spill
+ adcl $0, 64(%esp) # 4-byte Folded Spill
+ movl %edi, %eax
+ imull 60(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 272(%esp), %ecx
+ movl 456(%esp), %edx
+ calll .LmulPv256x32
+ addl 272(%esp), %edi
+ movl 56(%esp), %ecx # 4-byte Reload
+ adcl 276(%esp), %ecx
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 280(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 284(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 288(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 292(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 296(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 300(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 304(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ adcl $0, %ebp
+ movl %ebp, 100(%esp) # 4-byte Spill
+ adcl $0, %esi
+ movl %esi, 80(%esp) # 4-byte Spill
+ movl 88(%esp), %edi # 4-byte Reload
+ adcl $0, %edi
+ movl 84(%esp), %esi # 4-byte Reload
+ adcl $0, %esi
+ adcl $0, 64(%esp) # 4-byte Folded Spill
+ movl %ecx, %ebp
+ movl %ebp, %eax
+ imull 60(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 232(%esp), %ecx
+ movl 456(%esp), %edx
+ calll .LmulPv256x32
+ addl 232(%esp), %ebp
+ movl 68(%esp), %ecx # 4-byte Reload
+ adcl 236(%esp), %ecx
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 240(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 244(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 248(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 108(%esp), %ebp # 4-byte Reload
+ adcl 252(%esp), %ebp
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 256(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 260(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 264(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ adcl $0, 80(%esp) # 4-byte Folded Spill
+ adcl $0, %edi
+ movl %edi, 88(%esp) # 4-byte Spill
+ adcl $0, %esi
+ movl %esi, 84(%esp) # 4-byte Spill
+ movl 64(%esp), %esi # 4-byte Reload
+ adcl $0, %esi
+ movl %ecx, %edi
+ movl %edi, %eax
+ imull 60(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 192(%esp), %ecx
+ movl 456(%esp), %edx
+ calll .LmulPv256x32
+ addl 192(%esp), %edi
+ movl 72(%esp), %ecx # 4-byte Reload
+ adcl 196(%esp), %ecx
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 200(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 92(%esp), %edi # 4-byte Reload
+ adcl 204(%esp), %edi
+ adcl 208(%esp), %ebp
+ movl %ebp, 108(%esp) # 4-byte Spill
+ movl 104(%esp), %ebp # 4-byte Reload
+ adcl 212(%esp), %ebp
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 216(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 220(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 224(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ adcl $0, 88(%esp) # 4-byte Folded Spill
+ adcl $0, 84(%esp) # 4-byte Folded Spill
+ adcl $0, %esi
+ movl %esi, 64(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ movl %ecx, %esi
+ imull 60(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 152(%esp), %ecx
+ movl 456(%esp), %edx
+ calll .LmulPv256x32
+ addl 152(%esp), %esi
+ movl 96(%esp), %ecx # 4-byte Reload
+ adcl 156(%esp), %ecx
+ adcl 160(%esp), %edi
+ movl %edi, 92(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 164(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ adcl 168(%esp), %ebp
+ movl %ebp, 104(%esp) # 4-byte Spill
+ movl 76(%esp), %edi # 4-byte Reload
+ adcl 172(%esp), %edi
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 176(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 80(%esp), %ebp # 4-byte Reload
+ adcl 180(%esp), %ebp
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 184(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ adcl $0, 84(%esp) # 4-byte Folded Spill
+ adcl $0, 64(%esp) # 4-byte Folded Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ imull %ecx, %eax
+ movl %ecx, %esi
+ movl %eax, (%esp)
+ leal 112(%esp), %ecx
+ movl 456(%esp), %edx
+ calll .LmulPv256x32
+ addl 112(%esp), %esi
+ movl 92(%esp), %ecx # 4-byte Reload
+ adcl 116(%esp), %ecx
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 120(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 124(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ adcl 128(%esp), %edi
+ movl %edi, 76(%esp) # 4-byte Spill
+ movl %edi, %ebx
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 132(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl %eax, %esi
+ adcl 136(%esp), %ebp
+ movl %ebp, 80(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 140(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 144(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 64(%esp), %edi # 4-byte Reload
+ adcl $0, %edi
+ movl %ecx, %edx
+ subl 24(%esp), %edx # 4-byte Folded Reload
+ movl 108(%esp), %eax # 4-byte Reload
+ sbbl 20(%esp), %eax # 4-byte Folded Reload
+ movl 104(%esp), %ebp # 4-byte Reload
+ sbbl 28(%esp), %ebp # 4-byte Folded Reload
+ sbbl 32(%esp), %ebx # 4-byte Folded Reload
+ sbbl 36(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 68(%esp) # 4-byte Spill
+ movl 80(%esp), %esi # 4-byte Reload
+ sbbl 40(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 72(%esp) # 4-byte Spill
+ movl 88(%esp), %esi # 4-byte Reload
+ sbbl 44(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 92(%esp) # 4-byte Spill
+ movl 84(%esp), %esi # 4-byte Reload
+ sbbl 48(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 96(%esp) # 4-byte Spill
+ sbbl $0, %edi
+ andl $1, %edi
+ jne .LBB116_2
+# BB#1:
+ movl %edx, %ecx
+.LBB116_2:
+ movl 448(%esp), %edx
+ movl %ecx, (%edx)
+ movl %edi, %ecx
+ testb %cl, %cl
+ jne .LBB116_4
+# BB#3:
+ movl %eax, 108(%esp) # 4-byte Spill
+.LBB116_4:
+ movl 108(%esp), %eax # 4-byte Reload
+ movl %eax, 4(%edx)
+ movl 104(%esp), %eax # 4-byte Reload
+ jne .LBB116_6
+# BB#5:
+ movl %ebp, %eax
+.LBB116_6:
+ movl %eax, 8(%edx)
+ movl 84(%esp), %eax # 4-byte Reload
+ movl 76(%esp), %ebp # 4-byte Reload
+ jne .LBB116_8
+# BB#7:
+ movl %ebx, %ebp
+.LBB116_8:
+ movl %ebp, 12(%edx)
+ movl 100(%esp), %ebx # 4-byte Reload
+ jne .LBB116_10
+# BB#9:
+ movl 68(%esp), %ebx # 4-byte Reload
+.LBB116_10:
+ movl %ebx, 16(%edx)
+ movl 80(%esp), %edi # 4-byte Reload
+ jne .LBB116_12
+# BB#11:
+ movl 72(%esp), %edi # 4-byte Reload
+.LBB116_12:
+ movl %edi, 20(%edx)
+ movl 88(%esp), %esi # 4-byte Reload
+ jne .LBB116_14
+# BB#13:
+ movl 92(%esp), %esi # 4-byte Reload
+.LBB116_14:
+ movl %esi, 24(%edx)
+ jne .LBB116_16
+# BB#15:
+ movl 96(%esp), %eax # 4-byte Reload
+.LBB116_16:
+ movl %eax, 28(%edx)
+ addl $428, %esp # imm = 0x1AC
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end116:
+ .size mcl_fp_montRed8L, .Lfunc_end116-mcl_fp_montRed8L
+
+ .globl mcl_fp_addPre8L
+ .align 16, 0x90
+ .type mcl_fp_addPre8L,@function
+mcl_fp_addPre8L: # @mcl_fp_addPre8L
+# BB#0:
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ movl 24(%esp), %eax
+ movl (%eax), %ecx
+ movl 4(%eax), %edx
+ movl 20(%esp), %esi
+ addl (%esi), %ecx
+ adcl 4(%esi), %edx
+ movl 8(%eax), %edi
+ adcl 8(%esi), %edi
+ movl 16(%esp), %ebx
+ movl %ecx, (%ebx)
+ movl 12(%esi), %ecx
+ movl %edx, 4(%ebx)
+ movl 16(%esi), %edx
+ adcl 12(%eax), %ecx
+ adcl 16(%eax), %edx
+ movl %edi, 8(%ebx)
+ movl 20(%eax), %edi
+ movl %ecx, 12(%ebx)
+ movl 20(%esi), %ecx
+ adcl %edi, %ecx
+ movl 24(%eax), %edi
+ movl %edx, 16(%ebx)
+ movl 24(%esi), %edx
+ adcl %edi, %edx
+ movl %ecx, 20(%ebx)
+ movl %edx, 24(%ebx)
+ movl 28(%eax), %eax
+ movl 28(%esi), %ecx
+ adcl %eax, %ecx
+ movl %ecx, 28(%ebx)
+ sbbl %eax, %eax
+ andl $1, %eax
+ popl %esi
+ popl %edi
+ popl %ebx
+ retl
+.Lfunc_end117:
+ .size mcl_fp_addPre8L, .Lfunc_end117-mcl_fp_addPre8L
+
+ .globl mcl_fp_subPre8L
+ .align 16, 0x90
+ .type mcl_fp_subPre8L,@function
+mcl_fp_subPre8L: # @mcl_fp_subPre8L
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ movl 24(%esp), %ecx
+ movl (%ecx), %edx
+ movl 4(%ecx), %esi
+ xorl %eax, %eax
+ movl 28(%esp), %edi
+ subl (%edi), %edx
+ sbbl 4(%edi), %esi
+ movl 8(%ecx), %ebx
+ sbbl 8(%edi), %ebx
+ movl 20(%esp), %ebp
+ movl %edx, (%ebp)
+ movl 12(%ecx), %edx
+ sbbl 12(%edi), %edx
+ movl %esi, 4(%ebp)
+ movl 16(%ecx), %esi
+ sbbl 16(%edi), %esi
+ movl %ebx, 8(%ebp)
+ movl 20(%edi), %ebx
+ movl %edx, 12(%ebp)
+ movl 20(%ecx), %edx
+ sbbl %ebx, %edx
+ movl 24(%edi), %ebx
+ movl %esi, 16(%ebp)
+ movl 24(%ecx), %esi
+ sbbl %ebx, %esi
+ movl %edx, 20(%ebp)
+ movl %esi, 24(%ebp)
+ movl 28(%edi), %edx
+ movl 28(%ecx), %ecx
+ sbbl %edx, %ecx
+ movl %ecx, 28(%ebp)
+ sbbl $0, %eax
+ andl $1, %eax
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end118:
+ .size mcl_fp_subPre8L, .Lfunc_end118-mcl_fp_subPre8L
+
+ .globl mcl_fp_shr1_8L
+ .align 16, 0x90
+ .type mcl_fp_shr1_8L,@function
+mcl_fp_shr1_8L: # @mcl_fp_shr1_8L
+# BB#0:
+ pushl %esi
+ movl 12(%esp), %eax
+ movl (%eax), %ecx
+ movl 4(%eax), %edx
+ shrdl $1, %edx, %ecx
+ movl 8(%esp), %esi
+ movl %ecx, (%esi)
+ movl 8(%eax), %ecx
+ shrdl $1, %ecx, %edx
+ movl %edx, 4(%esi)
+ movl 12(%eax), %edx
+ shrdl $1, %edx, %ecx
+ movl %ecx, 8(%esi)
+ movl 16(%eax), %ecx
+ shrdl $1, %ecx, %edx
+ movl %edx, 12(%esi)
+ movl 20(%eax), %edx
+ shrdl $1, %edx, %ecx
+ movl %ecx, 16(%esi)
+ movl 24(%eax), %ecx
+ shrdl $1, %ecx, %edx
+ movl %edx, 20(%esi)
+ movl 28(%eax), %eax
+ shrdl $1, %eax, %ecx
+ movl %ecx, 24(%esi)
+ shrl %eax
+ movl %eax, 28(%esi)
+ popl %esi
+ retl
+.Lfunc_end119:
+ .size mcl_fp_shr1_8L, .Lfunc_end119-mcl_fp_shr1_8L
+
+ .globl mcl_fp_add8L
+ .align 16, 0x90
+ .type mcl_fp_add8L,@function
+mcl_fp_add8L: # @mcl_fp_add8L
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $20, %esp
+ movl 48(%esp), %edi
+ movl (%edi), %ecx
+ movl 4(%edi), %eax
+ movl 44(%esp), %edx
+ addl (%edx), %ecx
+ movl %ecx, 8(%esp) # 4-byte Spill
+ movl %ecx, %ebp
+ adcl 4(%edx), %eax
+ movl %eax, 16(%esp) # 4-byte Spill
+ movl 8(%edi), %eax
+ adcl 8(%edx), %eax
+ movl %eax, 12(%esp) # 4-byte Spill
+ movl 12(%edx), %esi
+ movl 16(%edx), %eax
+ adcl 12(%edi), %esi
+ adcl 16(%edi), %eax
+ movl %eax, 4(%esp) # 4-byte Spill
+ movl 20(%edx), %ecx
+ adcl 20(%edi), %ecx
+ movl 24(%edx), %ebx
+ adcl 24(%edi), %ebx
+ movl 28(%edx), %edi
+ movl 48(%esp), %edx
+ adcl 28(%edx), %edi
+ movl 40(%esp), %edx
+ movl %ebp, (%edx)
+ movl 16(%esp), %ebp # 4-byte Reload
+ movl %ebp, 4(%edx)
+ movl 12(%esp), %ebp # 4-byte Reload
+ movl %ebp, 8(%edx)
+ movl %esi, 12(%edx)
+ movl %eax, 16(%edx)
+ movl %ecx, 20(%edx)
+ movl %ebx, 24(%edx)
+ movl %edi, 28(%edx)
+ sbbl %eax, %eax
+ andl $1, %eax
+ movl 52(%esp), %edx
+ movl 8(%esp), %ebp # 4-byte Reload
+ subl (%edx), %ebp
+ movl %ebp, 8(%esp) # 4-byte Spill
+ movl 16(%esp), %ebp # 4-byte Reload
+ movl 52(%esp), %edx
+ sbbl 4(%edx), %ebp
+ movl %ebp, 16(%esp) # 4-byte Spill
+ movl 12(%esp), %ebp # 4-byte Reload
+ movl 52(%esp), %edx
+ sbbl 8(%edx), %ebp
+ movl %ebp, 12(%esp) # 4-byte Spill
+ movl 52(%esp), %ebp
+ sbbl 12(%ebp), %esi
+ movl %esi, (%esp) # 4-byte Spill
+ movl 4(%esp), %edx # 4-byte Reload
+ sbbl 16(%ebp), %edx
+ movl %edx, %esi
+ sbbl 20(%ebp), %ecx
+ sbbl 24(%ebp), %ebx
+ sbbl 28(%ebp), %edi
+ sbbl $0, %eax
+ testb $1, %al
+ jne .LBB120_2
+# BB#1: # %nocarry
+ movl 8(%esp), %edx # 4-byte Reload
+ movl 40(%esp), %ebp
+ movl %edx, (%ebp)
+ movl 16(%esp), %edx # 4-byte Reload
+ movl %edx, 4(%ebp)
+ movl 12(%esp), %edx # 4-byte Reload
+ movl %edx, 8(%ebp)
+ movl (%esp), %eax # 4-byte Reload
+ movl %eax, 12(%ebp)
+ movl %esi, 16(%ebp)
+ movl %ecx, 20(%ebp)
+ movl %ebx, 24(%ebp)
+ movl %edi, 28(%ebp)
+.LBB120_2: # %carry
+ addl $20, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end120:
+ .size mcl_fp_add8L, .Lfunc_end120-mcl_fp_add8L
+
+ .globl mcl_fp_addNF8L
+ .align 16, 0x90
+ .type mcl_fp_addNF8L,@function
+mcl_fp_addNF8L: # @mcl_fp_addNF8L
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $56, %esp
+ movl 84(%esp), %eax
+ movl (%eax), %ecx
+ movl 4(%eax), %edi
+ movl 80(%esp), %ebx
+ addl (%ebx), %ecx
+ movl %ecx, 24(%esp) # 4-byte Spill
+ adcl 4(%ebx), %edi
+ movl %edi, 32(%esp) # 4-byte Spill
+ movl 28(%eax), %ecx
+ movl %ecx, 48(%esp) # 4-byte Spill
+ movl 24(%eax), %ecx
+ movl %ecx, 52(%esp) # 4-byte Spill
+ movl 20(%eax), %ebp
+ movl 16(%eax), %esi
+ movl 12(%eax), %edx
+ movl 8(%eax), %ecx
+ adcl 8(%ebx), %ecx
+ movl %ecx, 28(%esp) # 4-byte Spill
+ adcl 12(%ebx), %edx
+ movl %edx, 36(%esp) # 4-byte Spill
+ adcl 16(%ebx), %esi
+ movl %esi, 40(%esp) # 4-byte Spill
+ adcl 20(%ebx), %ebp
+ movl %ebp, 44(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 24(%ebx), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 28(%ebx), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 88(%esp), %ebx
+ movl 24(%esp), %ebp # 4-byte Reload
+ movl %ebp, %eax
+ subl (%ebx), %eax
+ movl %eax, (%esp) # 4-byte Spill
+ sbbl 4(%ebx), %edi
+ movl %edi, 4(%esp) # 4-byte Spill
+ movl %ebp, %eax
+ sbbl 8(%ebx), %ecx
+ movl %ecx, 8(%esp) # 4-byte Spill
+ sbbl 12(%ebx), %edx
+ movl %edx, 12(%esp) # 4-byte Spill
+ sbbl 16(%ebx), %esi
+ movl %esi, 16(%esp) # 4-byte Spill
+ movl 44(%esp), %esi # 4-byte Reload
+ sbbl 20(%ebx), %esi
+ movl %esi, 20(%esp) # 4-byte Spill
+ movl 52(%esp), %ebp # 4-byte Reload
+ sbbl 24(%ebx), %ebp
+ movl 48(%esp), %esi # 4-byte Reload
+ sbbl 28(%ebx), %esi
+ testl %esi, %esi
+ js .LBB121_2
+# BB#1:
+ movl (%esp), %eax # 4-byte Reload
+.LBB121_2:
+ movl 76(%esp), %ebx
+ movl %eax, (%ebx)
+ movl 32(%esp), %eax # 4-byte Reload
+ js .LBB121_4
+# BB#3:
+ movl 4(%esp), %eax # 4-byte Reload
+.LBB121_4:
+ movl %eax, 4(%ebx)
+ movl 40(%esp), %edx # 4-byte Reload
+ movl 28(%esp), %edi # 4-byte Reload
+ js .LBB121_6
+# BB#5:
+ movl 8(%esp), %edi # 4-byte Reload
+.LBB121_6:
+ movl %edi, 8(%ebx)
+ movl 44(%esp), %ecx # 4-byte Reload
+ movl 36(%esp), %eax # 4-byte Reload
+ js .LBB121_8
+# BB#7:
+ movl 12(%esp), %eax # 4-byte Reload
+.LBB121_8:
+ movl %eax, 12(%ebx)
+ movl 48(%esp), %edi # 4-byte Reload
+ movl 52(%esp), %eax # 4-byte Reload
+ js .LBB121_10
+# BB#9:
+ movl 16(%esp), %edx # 4-byte Reload
+.LBB121_10:
+ movl %edx, 16(%ebx)
+ js .LBB121_12
+# BB#11:
+ movl 20(%esp), %ecx # 4-byte Reload
+.LBB121_12:
+ movl %ecx, 20(%ebx)
+ js .LBB121_14
+# BB#13:
+ movl %ebp, %eax
+.LBB121_14:
+ movl %eax, 24(%ebx)
+ js .LBB121_16
+# BB#15:
+ movl %esi, %edi
+.LBB121_16:
+ movl %edi, 28(%ebx)
+ addl $56, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end121:
+ .size mcl_fp_addNF8L, .Lfunc_end121-mcl_fp_addNF8L
+
+ .globl mcl_fp_sub8L
+ .align 16, 0x90
+ .type mcl_fp_sub8L,@function
+mcl_fp_sub8L: # @mcl_fp_sub8L
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $28, %esp
+ movl 52(%esp), %esi
+ movl (%esi), %ecx
+ movl 4(%esi), %eax
+ xorl %ebx, %ebx
+ movl 56(%esp), %ebp
+ subl (%ebp), %ecx
+ movl %ecx, 16(%esp) # 4-byte Spill
+ sbbl 4(%ebp), %eax
+ movl %eax, 20(%esp) # 4-byte Spill
+ movl 8(%esi), %edx
+ sbbl 8(%ebp), %edx
+ movl %edx, 8(%esp) # 4-byte Spill
+ movl 12(%esi), %eax
+ sbbl 12(%ebp), %eax
+ movl %eax, 24(%esp) # 4-byte Spill
+ movl 16(%esi), %ecx
+ sbbl 16(%ebp), %ecx
+ movl %ecx, 12(%esp) # 4-byte Spill
+ movl 20(%esi), %eax
+ sbbl 20(%ebp), %eax
+ movl %eax, 4(%esp) # 4-byte Spill
+ movl 24(%esi), %edi
+ sbbl 24(%ebp), %edi
+ movl 28(%esi), %esi
+ sbbl 28(%ebp), %esi
+ sbbl $0, %ebx
+ testb $1, %bl
+ movl 48(%esp), %ebx
+ movl 16(%esp), %ebp # 4-byte Reload
+ movl %ebp, (%ebx)
+ movl 20(%esp), %ebp # 4-byte Reload
+ movl %ebp, 4(%ebx)
+ movl %edx, 8(%ebx)
+ movl 24(%esp), %edx # 4-byte Reload
+ movl %edx, 12(%ebx)
+ movl %ecx, 16(%ebx)
+ movl %eax, 20(%ebx)
+ movl %edi, 24(%ebx)
+ movl %esi, 28(%ebx)
+ je .LBB122_2
+# BB#1: # %carry
+ movl %esi, (%esp) # 4-byte Spill
+ movl 60(%esp), %esi
+ movl 16(%esp), %ecx # 4-byte Reload
+ addl (%esi), %ecx
+ movl %ecx, (%ebx)
+ movl 20(%esp), %edx # 4-byte Reload
+ adcl 4(%esi), %edx
+ movl %edx, 4(%ebx)
+ movl 8(%esp), %ebp # 4-byte Reload
+ adcl 8(%esi), %ebp
+ movl 12(%esi), %eax
+ adcl 24(%esp), %eax # 4-byte Folded Reload
+ movl %ebp, 8(%ebx)
+ movl 16(%esi), %ecx
+ adcl 12(%esp), %ecx # 4-byte Folded Reload
+ movl %eax, 12(%ebx)
+ movl 20(%esi), %eax
+ adcl 4(%esp), %eax # 4-byte Folded Reload
+ movl %ecx, 16(%ebx)
+ movl %eax, 20(%ebx)
+ movl 24(%esi), %eax
+ adcl %edi, %eax
+ movl %eax, 24(%ebx)
+ movl 28(%esi), %eax
+ adcl (%esp), %eax # 4-byte Folded Reload
+ movl %eax, 28(%ebx)
+.LBB122_2: # %nocarry
+ addl $28, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end122:
+ .size mcl_fp_sub8L, .Lfunc_end122-mcl_fp_sub8L
+
+ .globl mcl_fp_subNF8L
+ .align 16, 0x90
+ .type mcl_fp_subNF8L,@function
+mcl_fp_subNF8L: # @mcl_fp_subNF8L
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $40, %esp
+ movl 64(%esp), %eax
+ movl (%eax), %esi
+ movl 4(%eax), %edx
+ movl 68(%esp), %ecx
+ subl (%ecx), %esi
+ movl %esi, 24(%esp) # 4-byte Spill
+ sbbl 4(%ecx), %edx
+ movl %edx, 28(%esp) # 4-byte Spill
+ movl 28(%eax), %edx
+ movl 24(%eax), %esi
+ movl 20(%eax), %edi
+ movl 16(%eax), %ebx
+ movl 12(%eax), %ebp
+ movl 8(%eax), %eax
+ sbbl 8(%ecx), %eax
+ movl %eax, 8(%esp) # 4-byte Spill
+ sbbl 12(%ecx), %ebp
+ movl %ebp, 12(%esp) # 4-byte Spill
+ sbbl 16(%ecx), %ebx
+ movl %ebx, 16(%esp) # 4-byte Spill
+ sbbl 20(%ecx), %edi
+ movl %edi, 20(%esp) # 4-byte Spill
+ sbbl 24(%ecx), %esi
+ movl %esi, 32(%esp) # 4-byte Spill
+ movl %edx, %edi
+ sbbl 28(%ecx), %edi
+ movl %edi, 36(%esp) # 4-byte Spill
+ sarl $31, %edi
+ movl 72(%esp), %ebp
+ movl 28(%ebp), %eax
+ andl %edi, %eax
+ movl %eax, 4(%esp) # 4-byte Spill
+ movl 24(%ebp), %eax
+ andl %edi, %eax
+ movl %eax, (%esp) # 4-byte Spill
+ movl 20(%ebp), %ebx
+ andl %edi, %ebx
+ movl 16(%ebp), %esi
+ andl %edi, %esi
+ movl 12(%ebp), %edx
+ andl %edi, %edx
+ movl 8(%ebp), %ecx
+ andl %edi, %ecx
+ movl 4(%ebp), %eax
+ andl %edi, %eax
+ andl (%ebp), %edi
+ addl 24(%esp), %edi # 4-byte Folded Reload
+ adcl 28(%esp), %eax # 4-byte Folded Reload
+ movl 60(%esp), %ebp
+ movl %edi, (%ebp)
+ adcl 8(%esp), %ecx # 4-byte Folded Reload
+ movl %eax, 4(%ebp)
+ adcl 12(%esp), %edx # 4-byte Folded Reload
+ movl %ecx, 8(%ebp)
+ adcl 16(%esp), %esi # 4-byte Folded Reload
+ movl %edx, 12(%ebp)
+ adcl 20(%esp), %ebx # 4-byte Folded Reload
+ movl %esi, 16(%ebp)
+ movl (%esp), %eax # 4-byte Reload
+ adcl 32(%esp), %eax # 4-byte Folded Reload
+ movl %ebx, 20(%ebp)
+ movl %eax, 24(%ebp)
+ movl 4(%esp), %eax # 4-byte Reload
+ adcl 36(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 28(%ebp)
+ addl $40, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end123:
+ .size mcl_fp_subNF8L, .Lfunc_end123-mcl_fp_subNF8L
+
+ .globl mcl_fpDbl_add8L
+ .align 16, 0x90
+ .type mcl_fpDbl_add8L,@function
+mcl_fpDbl_add8L: # @mcl_fpDbl_add8L
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $56, %esp
+ movl 84(%esp), %ecx
+ movl (%ecx), %esi
+ movl 4(%ecx), %edx
+ movl 80(%esp), %ebp
+ addl (%ebp), %esi
+ adcl 4(%ebp), %edx
+ movl 8(%ecx), %edi
+ adcl 8(%ebp), %edi
+ movl 12(%ebp), %ebx
+ movl 76(%esp), %eax
+ movl %esi, (%eax)
+ movl 16(%ebp), %esi
+ adcl 12(%ecx), %ebx
+ adcl 16(%ecx), %esi
+ movl %edx, 4(%eax)
+ movl 40(%ecx), %edx
+ movl %edx, 52(%esp) # 4-byte Spill
+ movl %edi, 8(%eax)
+ movl 20(%ecx), %edx
+ movl %ebx, 12(%eax)
+ movl 20(%ebp), %edi
+ adcl %edx, %edi
+ movl 24(%ecx), %edx
+ movl %esi, 16(%eax)
+ movl 24(%ebp), %esi
+ adcl %edx, %esi
+ movl 28(%ecx), %edx
+ movl %edi, 20(%eax)
+ movl 28(%ebp), %ebx
+ adcl %edx, %ebx
+ movl 32(%ecx), %edx
+ movl %esi, 24(%eax)
+ movl 32(%ebp), %esi
+ adcl %edx, %esi
+ movl %esi, 44(%esp) # 4-byte Spill
+ movl 36(%ecx), %edx
+ movl %ebx, 28(%eax)
+ movl 36(%ebp), %ebx
+ adcl %edx, %ebx
+ movl %ebx, 28(%esp) # 4-byte Spill
+ movl 40(%ebp), %eax
+ adcl 52(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 44(%ecx), %edx
+ movl 44(%ebp), %edi
+ adcl %edx, %edi
+ movl %edi, 32(%esp) # 4-byte Spill
+ movl 48(%ecx), %edx
+ movl 48(%ebp), %eax
+ adcl %edx, %eax
+ movl %eax, 24(%esp) # 4-byte Spill
+ movl 52(%ecx), %edx
+ movl 52(%ebp), %esi
+ adcl %edx, %esi
+ movl %esi, 36(%esp) # 4-byte Spill
+ movl 56(%ecx), %edx
+ movl 56(%ebp), %eax
+ adcl %edx, %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 60(%ecx), %ecx
+ movl 60(%ebp), %ebp
+ adcl %ecx, %ebp
+ movl %ebp, 40(%esp) # 4-byte Spill
+ sbbl %ecx, %ecx
+ andl $1, %ecx
+ movl 44(%esp), %eax # 4-byte Reload
+ movl 88(%esp), %edx
+ subl (%edx), %eax
+ movl %eax, (%esp) # 4-byte Spill
+ movl 88(%esp), %eax
+ sbbl 4(%eax), %ebx
+ movl %eax, %edx
+ movl %ebx, 4(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ movl %edx, %ebx
+ sbbl 8(%ebx), %eax
+ movl %eax, 8(%esp) # 4-byte Spill
+ movl %edi, %eax
+ movl 24(%esp), %edi # 4-byte Reload
+ sbbl 12(%ebx), %eax
+ movl %eax, 12(%esp) # 4-byte Spill
+ movl %edi, %eax
+ sbbl 16(%ebx), %eax
+ sbbl 20(%ebx), %esi
+ movl %esi, 16(%esp) # 4-byte Spill
+ movl 52(%esp), %edx # 4-byte Reload
+ sbbl 24(%ebx), %edx
+ movl %edx, 20(%esp) # 4-byte Spill
+ sbbl 28(%ebx), %ebp
+ sbbl $0, %ecx
+ andl $1, %ecx
+ jne .LBB124_2
+# BB#1:
+ movl %eax, %edi
+.LBB124_2:
+ testb %cl, %cl
+ movl 44(%esp), %ecx # 4-byte Reload
+ jne .LBB124_4
+# BB#3:
+ movl (%esp), %ecx # 4-byte Reload
+.LBB124_4:
+ movl 76(%esp), %eax
+ movl %ecx, 32(%eax)
+ movl 40(%esp), %ecx # 4-byte Reload
+ movl 32(%esp), %edx # 4-byte Reload
+ movl 48(%esp), %esi # 4-byte Reload
+ movl 28(%esp), %ebx # 4-byte Reload
+ jne .LBB124_6
+# BB#5:
+ movl 4(%esp), %ebx # 4-byte Reload
+.LBB124_6:
+ movl %ebx, 36(%eax)
+ jne .LBB124_8
+# BB#7:
+ movl 8(%esp), %esi # 4-byte Reload
+.LBB124_8:
+ movl %esi, 40(%eax)
+ movl 36(%esp), %esi # 4-byte Reload
+ jne .LBB124_10
+# BB#9:
+ movl 12(%esp), %edx # 4-byte Reload
+.LBB124_10:
+ movl %edx, 44(%eax)
+ movl %edi, 48(%eax)
+ movl 52(%esp), %edx # 4-byte Reload
+ jne .LBB124_12
+# BB#11:
+ movl 16(%esp), %esi # 4-byte Reload
+.LBB124_12:
+ movl %esi, 52(%eax)
+ jne .LBB124_14
+# BB#13:
+ movl 20(%esp), %edx # 4-byte Reload
+.LBB124_14:
+ movl %edx, 56(%eax)
+ jne .LBB124_16
+# BB#15:
+ movl %ebp, %ecx
+.LBB124_16:
+ movl %ecx, 60(%eax)
+ addl $56, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end124:
+ .size mcl_fpDbl_add8L, .Lfunc_end124-mcl_fpDbl_add8L
+
+ .globl mcl_fpDbl_sub8L
+ .align 16, 0x90
+ .type mcl_fpDbl_sub8L,@function
+mcl_fpDbl_sub8L: # @mcl_fpDbl_sub8L
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $40, %esp
+ movl 64(%esp), %edi
+ movl (%edi), %eax
+ movl 4(%edi), %edx
+ movl 68(%esp), %ebx
+ subl (%ebx), %eax
+ sbbl 4(%ebx), %edx
+ movl 8(%edi), %esi
+ sbbl 8(%ebx), %esi
+ movl 60(%esp), %ecx
+ movl %eax, (%ecx)
+ movl 12(%edi), %eax
+ sbbl 12(%ebx), %eax
+ movl %edx, 4(%ecx)
+ movl 16(%edi), %edx
+ sbbl 16(%ebx), %edx
+ movl %esi, 8(%ecx)
+ movl 20(%ebx), %esi
+ movl %eax, 12(%ecx)
+ movl 20(%edi), %eax
+ sbbl %esi, %eax
+ movl 24(%ebx), %esi
+ movl %edx, 16(%ecx)
+ movl 24(%edi), %edx
+ sbbl %esi, %edx
+ movl 28(%ebx), %esi
+ movl %eax, 20(%ecx)
+ movl 28(%edi), %eax
+ sbbl %esi, %eax
+ movl 32(%ebx), %esi
+ movl %edx, 24(%ecx)
+ movl 32(%edi), %edx
+ sbbl %esi, %edx
+ movl %edx, 16(%esp) # 4-byte Spill
+ movl 36(%ebx), %edx
+ movl %eax, 28(%ecx)
+ movl 36(%edi), %eax
+ sbbl %edx, %eax
+ movl %eax, 8(%esp) # 4-byte Spill
+ movl 40(%ebx), %eax
+ movl 40(%edi), %edx
+ sbbl %eax, %edx
+ movl %edx, 12(%esp) # 4-byte Spill
+ movl 44(%ebx), %eax
+ movl 44(%edi), %edx
+ sbbl %eax, %edx
+ movl %edx, 20(%esp) # 4-byte Spill
+ movl 48(%ebx), %eax
+ movl 48(%edi), %edx
+ sbbl %eax, %edx
+ movl %edx, 24(%esp) # 4-byte Spill
+ movl 52(%ebx), %eax
+ movl 52(%edi), %edx
+ sbbl %eax, %edx
+ movl %edx, 28(%esp) # 4-byte Spill
+ movl 56(%ebx), %eax
+ movl 56(%edi), %edx
+ sbbl %eax, %edx
+ movl %edx, 32(%esp) # 4-byte Spill
+ movl 60(%ebx), %eax
+ movl 60(%edi), %edx
+ sbbl %eax, %edx
+ movl %edx, 36(%esp) # 4-byte Spill
+ movl $0, %eax
+ sbbl $0, %eax
+ andl $1, %eax
+ movl 72(%esp), %ebx
+ jne .LBB125_1
+# BB#2:
+ movl $0, 4(%esp) # 4-byte Folded Spill
+ jmp .LBB125_3
+.LBB125_1:
+ movl 28(%ebx), %edx
+ movl %edx, 4(%esp) # 4-byte Spill
+.LBB125_3:
+ testb %al, %al
+ jne .LBB125_4
+# BB#5:
+ movl $0, %ebp
+ movl $0, %eax
+ jmp .LBB125_6
+.LBB125_4:
+ movl (%ebx), %eax
+ movl 4(%ebx), %ebp
+.LBB125_6:
+ jne .LBB125_7
+# BB#8:
+ movl $0, (%esp) # 4-byte Folded Spill
+ jmp .LBB125_9
+.LBB125_7:
+ movl 24(%ebx), %edx
+ movl %edx, (%esp) # 4-byte Spill
+.LBB125_9:
+ jne .LBB125_10
+# BB#11:
+ movl $0, %edx
+ jmp .LBB125_12
+.LBB125_10:
+ movl 20(%ebx), %edx
+.LBB125_12:
+ jne .LBB125_13
+# BB#14:
+ movl $0, %esi
+ jmp .LBB125_15
+.LBB125_13:
+ movl 16(%ebx), %esi
+.LBB125_15:
+ jne .LBB125_16
+# BB#17:
+ movl $0, %edi
+ jmp .LBB125_18
+.LBB125_16:
+ movl 12(%ebx), %edi
+.LBB125_18:
+ jne .LBB125_19
+# BB#20:
+ xorl %ebx, %ebx
+ jmp .LBB125_21
+.LBB125_19:
+ movl 8(%ebx), %ebx
+.LBB125_21:
+ addl 16(%esp), %eax # 4-byte Folded Reload
+ adcl 8(%esp), %ebp # 4-byte Folded Reload
+ movl %eax, 32(%ecx)
+ adcl 12(%esp), %ebx # 4-byte Folded Reload
+ movl %ebp, 36(%ecx)
+ adcl 20(%esp), %edi # 4-byte Folded Reload
+ movl %ebx, 40(%ecx)
+ adcl 24(%esp), %esi # 4-byte Folded Reload
+ movl %edi, 44(%ecx)
+ adcl 28(%esp), %edx # 4-byte Folded Reload
+ movl %esi, 48(%ecx)
+ movl (%esp), %eax # 4-byte Reload
+ adcl 32(%esp), %eax # 4-byte Folded Reload
+ movl %edx, 52(%ecx)
+ movl %eax, 56(%ecx)
+ movl 4(%esp), %eax # 4-byte Reload
+ adcl 36(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 60(%ecx)
+ addl $40, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end125:
+ .size mcl_fpDbl_sub8L, .Lfunc_end125-mcl_fpDbl_sub8L
+
+ .align 16, 0x90
+ .type .LmulPv288x32,@function
+.LmulPv288x32: # @mulPv288x32
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $56, %esp
+ movl %edx, %esi
+ movl 76(%esp), %edi
+ movl %edi, %eax
+ mull 32(%esi)
+ movl %edx, 52(%esp) # 4-byte Spill
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl %edi, %eax
+ mull 28(%esi)
+ movl %edx, 44(%esp) # 4-byte Spill
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl %edi, %eax
+ mull 24(%esi)
+ movl %edx, 36(%esp) # 4-byte Spill
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl %edi, %eax
+ mull 20(%esi)
+ movl %edx, 28(%esp) # 4-byte Spill
+ movl %eax, 24(%esp) # 4-byte Spill
+ movl %edi, %eax
+ mull 16(%esi)
+ movl %edx, 20(%esp) # 4-byte Spill
+ movl %eax, 16(%esp) # 4-byte Spill
+ movl %edi, %eax
+ mull 12(%esi)
+ movl %edx, 12(%esp) # 4-byte Spill
+ movl %eax, 8(%esp) # 4-byte Spill
+ movl %edi, %eax
+ mull 8(%esi)
+ movl %edx, %ebx
+ movl %eax, 4(%esp) # 4-byte Spill
+ movl %edi, %eax
+ mull 4(%esi)
+ movl %edx, %ebp
+ movl %eax, (%esp) # 4-byte Spill
+ movl %edi, %eax
+ mull (%esi)
+ movl %eax, (%ecx)
+ addl (%esp), %edx # 4-byte Folded Reload
+ movl %edx, 4(%ecx)
+ adcl 4(%esp), %ebp # 4-byte Folded Reload
+ movl %ebp, 8(%ecx)
+ adcl 8(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 12(%ecx)
+ movl 12(%esp), %eax # 4-byte Reload
+ adcl 16(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 16(%ecx)
+ movl 20(%esp), %eax # 4-byte Reload
+ adcl 24(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 20(%ecx)
+ movl 28(%esp), %eax # 4-byte Reload
+ adcl 32(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 24(%ecx)
+ movl 36(%esp), %eax # 4-byte Reload
+ adcl 40(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 28(%ecx)
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 48(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 32(%ecx)
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl $0, %eax
+ movl %eax, 36(%ecx)
+ movl %ecx, %eax
+ addl $56, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end126:
+ .size .LmulPv288x32, .Lfunc_end126-.LmulPv288x32
+
+ .globl mcl_fp_mulUnitPre9L
+ .align 16, 0x90
+ .type mcl_fp_mulUnitPre9L,@function
+mcl_fp_mulUnitPre9L: # @mcl_fp_mulUnitPre9L
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $76, %esp
+ calll .L127$pb
+.L127$pb:
+ popl %ebx
+.Ltmp8:
+ addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp8-.L127$pb), %ebx
+ movl 104(%esp), %eax
+ movl %eax, (%esp)
+ leal 32(%esp), %ecx
+ movl 100(%esp), %edx
+ calll .LmulPv288x32
+ movl 68(%esp), %eax
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl 64(%esp), %eax
+ movl %eax, 24(%esp) # 4-byte Spill
+ movl 60(%esp), %eax
+ movl %eax, 20(%esp) # 4-byte Spill
+ movl 56(%esp), %eax
+ movl %eax, 16(%esp) # 4-byte Spill
+ movl 52(%esp), %edi
+ movl 48(%esp), %ebx
+ movl 44(%esp), %ebp
+ movl 40(%esp), %esi
+ movl 32(%esp), %edx
+ movl 36(%esp), %ecx
+ movl 96(%esp), %eax
+ movl %edx, (%eax)
+ movl %ecx, 4(%eax)
+ movl %esi, 8(%eax)
+ movl %ebp, 12(%eax)
+ movl %ebx, 16(%eax)
+ movl %edi, 20(%eax)
+ movl 16(%esp), %ecx # 4-byte Reload
+ movl %ecx, 24(%eax)
+ movl 20(%esp), %ecx # 4-byte Reload
+ movl %ecx, 28(%eax)
+ movl 24(%esp), %ecx # 4-byte Reload
+ movl %ecx, 32(%eax)
+ movl 28(%esp), %ecx # 4-byte Reload
+ movl %ecx, 36(%eax)
+ addl $76, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end127:
+ .size mcl_fp_mulUnitPre9L, .Lfunc_end127-mcl_fp_mulUnitPre9L
+
+ .globl mcl_fpDbl_mulPre9L
+ .align 16, 0x90
+ .type mcl_fpDbl_mulPre9L,@function
+mcl_fpDbl_mulPre9L: # @mcl_fpDbl_mulPre9L
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $428, %esp # imm = 0x1AC
+ calll .L128$pb
+.L128$pb:
+ popl %esi
+.Ltmp9:
+ addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp9-.L128$pb), %esi
+ movl %esi, 60(%esp) # 4-byte Spill
+ movl 456(%esp), %eax
+ movl (%eax), %eax
+ movl %eax, (%esp)
+ leal 384(%esp), %ecx
+ movl 452(%esp), %edx
+ movl %edx, %ebp
+ movl %esi, %ebx
+ calll .LmulPv288x32
+ movl 420(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 416(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 412(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 408(%esp), %eax
+ movl %eax, 24(%esp) # 4-byte Spill
+ movl 404(%esp), %eax
+ movl %eax, 12(%esp) # 4-byte Spill
+ movl 400(%esp), %eax
+ movl %eax, 20(%esp) # 4-byte Spill
+ movl 396(%esp), %eax
+ movl %eax, 16(%esp) # 4-byte Spill
+ movl 392(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 384(%esp), %eax
+ movl 388(%esp), %edi
+ movl 448(%esp), %ecx
+ movl %eax, (%ecx)
+ movl 456(%esp), %eax
+ movl 4(%eax), %eax
+ movl %eax, (%esp)
+ leal 344(%esp), %ecx
+ movl %ebp, %edx
+ movl %esi, %ebx
+ calll .LmulPv288x32
+ addl 344(%esp), %edi
+ movl %edi, 8(%esp) # 4-byte Spill
+ movl 380(%esp), %eax
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl 376(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 372(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 368(%esp), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 364(%esp), %ebx
+ movl 360(%esp), %edi
+ movl 356(%esp), %esi
+ movl 348(%esp), %ecx
+ movl 352(%esp), %edx
+ movl 448(%esp), %eax
+ movl 8(%esp), %ebp # 4-byte Reload
+ movl %ebp, 4(%eax)
+ adcl 40(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 40(%esp) # 4-byte Spill
+ adcl 16(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 16(%esp) # 4-byte Spill
+ adcl 20(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 20(%esp) # 4-byte Spill
+ adcl 12(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 12(%esp) # 4-byte Spill
+ adcl 24(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 8(%esp) # 4-byte Spill
+ movl 32(%esp), %eax # 4-byte Reload
+ adcl 36(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 44(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 52(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 56(%esp) # 4-byte Spill
+ adcl $0, 28(%esp) # 4-byte Folded Spill
+ movl 456(%esp), %eax
+ movl 8(%eax), %eax
+ movl %eax, (%esp)
+ leal 304(%esp), %ecx
+ movl 452(%esp), %edx
+ movl 60(%esp), %ebx # 4-byte Reload
+ calll .LmulPv288x32
+ movl 40(%esp), %eax # 4-byte Reload
+ addl 304(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 340(%esp), %eax
+ movl %eax, 24(%esp) # 4-byte Spill
+ movl 336(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 332(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 328(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 324(%esp), %edi
+ movl 320(%esp), %ebp
+ movl 316(%esp), %esi
+ movl 308(%esp), %ecx
+ movl 312(%esp), %edx
+ movl 448(%esp), %eax
+ movl 40(%esp), %ebx # 4-byte Reload
+ movl %ebx, 8(%eax)
+ adcl 16(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 40(%esp) # 4-byte Spill
+ adcl 20(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 16(%esp) # 4-byte Spill
+ adcl 12(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 20(%esp) # 4-byte Spill
+ adcl 8(%esp), %ebp # 4-byte Folded Reload
+ movl %ebp, 12(%esp) # 4-byte Spill
+ adcl 32(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 8(%esp) # 4-byte Spill
+ movl 36(%esp), %eax # 4-byte Reload
+ adcl 48(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 56(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 28(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 52(%esp) # 4-byte Spill
+ adcl $0, 24(%esp) # 4-byte Folded Spill
+ movl 456(%esp), %eax
+ movl 12(%eax), %eax
+ movl %eax, (%esp)
+ leal 264(%esp), %ecx
+ movl 452(%esp), %edx
+ movl 60(%esp), %ebx # 4-byte Reload
+ calll .LmulPv288x32
+ movl 40(%esp), %eax # 4-byte Reload
+ addl 264(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 300(%esp), %eax
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl 296(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 292(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 288(%esp), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 284(%esp), %ebx
+ movl 280(%esp), %edi
+ movl 276(%esp), %esi
+ movl 268(%esp), %ecx
+ movl 272(%esp), %edx
+ movl 448(%esp), %eax
+ movl 40(%esp), %ebp # 4-byte Reload
+ movl %ebp, 12(%eax)
+ adcl 16(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, %ebp
+ adcl 20(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 16(%esp) # 4-byte Spill
+ adcl 12(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 20(%esp) # 4-byte Spill
+ adcl 8(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 8(%esp) # 4-byte Spill
+ adcl 36(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 4(%esp) # 4-byte Spill
+ movl 32(%esp), %eax # 4-byte Reload
+ adcl 44(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 52(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 24(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 56(%esp) # 4-byte Spill
+ adcl $0, 28(%esp) # 4-byte Folded Spill
+ movl 456(%esp), %eax
+ movl 16(%eax), %eax
+ movl %eax, (%esp)
+ leal 224(%esp), %ecx
+ movl 452(%esp), %edx
+ movl 60(%esp), %ebx # 4-byte Reload
+ calll .LmulPv288x32
+ addl 224(%esp), %ebp
+ movl %ebp, 44(%esp) # 4-byte Spill
+ movl 260(%esp), %eax
+ movl %eax, 24(%esp) # 4-byte Spill
+ movl 256(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 252(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 248(%esp), %ebx
+ movl 244(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 240(%esp), %edi
+ movl 236(%esp), %ebp
+ movl 228(%esp), %ecx
+ movl 232(%esp), %edx
+ movl 448(%esp), %eax
+ movl 44(%esp), %esi # 4-byte Reload
+ movl %esi, 16(%eax)
+ adcl 16(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 44(%esp) # 4-byte Spill
+ adcl 20(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 12(%esp) # 4-byte Spill
+ adcl 8(%esp), %ebp # 4-byte Folded Reload
+ movl %ebp, 16(%esp) # 4-byte Spill
+ adcl 4(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 8(%esp) # 4-byte Spill
+ movl 32(%esp), %eax # 4-byte Reload
+ adcl %eax, 36(%esp) # 4-byte Folded Spill
+ adcl 48(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 4(%esp) # 4-byte Spill
+ movl 40(%esp), %eax # 4-byte Reload
+ adcl 56(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 28(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 52(%esp) # 4-byte Spill
+ adcl $0, 24(%esp) # 4-byte Folded Spill
+ movl 456(%esp), %eax
+ movl 20(%eax), %eax
+ movl %eax, (%esp)
+ leal 184(%esp), %ecx
+ movl 452(%esp), %edx
+ movl 60(%esp), %ebx # 4-byte Reload
+ calll .LmulPv288x32
+ movl 44(%esp), %eax # 4-byte Reload
+ addl 184(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 220(%esp), %eax
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl 216(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 212(%esp), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 208(%esp), %eax
+ movl %eax, 20(%esp) # 4-byte Spill
+ movl 204(%esp), %edi
+ movl 200(%esp), %ebx
+ movl 196(%esp), %esi
+ movl 188(%esp), %ecx
+ movl 192(%esp), %edx
+ movl 448(%esp), %eax
+ movl 44(%esp), %ebp # 4-byte Reload
+ movl %ebp, 20(%eax)
+ adcl 12(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, %ebp
+ adcl 16(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 12(%esp) # 4-byte Spill
+ adcl 8(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 16(%esp) # 4-byte Spill
+ adcl 36(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 8(%esp) # 4-byte Spill
+ adcl 4(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 4(%esp) # 4-byte Spill
+ movl 40(%esp), %eax # 4-byte Reload
+ adcl %eax, 20(%esp) # 4-byte Folded Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl %eax, 32(%esp) # 4-byte Folded Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 24(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 48(%esp) # 4-byte Spill
+ adcl $0, 28(%esp) # 4-byte Folded Spill
+ movl 456(%esp), %eax
+ movl 24(%eax), %eax
+ movl %eax, (%esp)
+ leal 144(%esp), %ecx
+ movl 452(%esp), %edx
+ movl 60(%esp), %ebx # 4-byte Reload
+ calll .LmulPv288x32
+ addl 144(%esp), %ebp
+ movl %ebp, 52(%esp) # 4-byte Spill
+ movl 180(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 176(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 172(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 168(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 164(%esp), %ebx
+ movl 160(%esp), %edi
+ movl 156(%esp), %esi
+ movl 148(%esp), %ecx
+ movl 152(%esp), %edx
+ movl 448(%esp), %eax
+ movl 52(%esp), %ebp # 4-byte Reload
+ movl %ebp, 24(%eax)
+ adcl 12(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 12(%esp) # 4-byte Spill
+ adcl 16(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 24(%esp) # 4-byte Spill
+ adcl 8(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 52(%esp) # 4-byte Spill
+ adcl 4(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 16(%esp) # 4-byte Spill
+ adcl 20(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 20(%esp) # 4-byte Spill
+ movl 40(%esp), %eax # 4-byte Reload
+ adcl 32(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 48(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 28(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 56(%esp) # 4-byte Spill
+ adcl $0, 36(%esp) # 4-byte Folded Spill
+ movl 456(%esp), %eax
+ movl 28(%eax), %eax
+ movl %eax, (%esp)
+ leal 104(%esp), %ecx
+ movl 452(%esp), %edx
+ movl 60(%esp), %ebx # 4-byte Reload
+ calll .LmulPv288x32
+ movl 12(%esp), %esi # 4-byte Reload
+ addl 104(%esp), %esi
+ movl 140(%esp), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 136(%esp), %ebp
+ movl 132(%esp), %edi
+ movl 128(%esp), %ebx
+ movl 124(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 120(%esp), %eax
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl 116(%esp), %edx
+ movl 108(%esp), %eax
+ movl %eax, 12(%esp) # 4-byte Spill
+ movl 112(%esp), %ecx
+ movl 448(%esp), %eax
+ movl %esi, 28(%eax)
+ movl 12(%esp), %esi # 4-byte Reload
+ adcl 24(%esp), %esi # 4-byte Folded Reload
+ adcl 52(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 24(%esp) # 4-byte Spill
+ adcl 16(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 52(%esp) # 4-byte Spill
+ movl 20(%esp), %eax # 4-byte Reload
+ adcl %eax, 28(%esp) # 4-byte Folded Spill
+ movl 40(%esp), %eax # 4-byte Reload
+ adcl %eax, 48(%esp) # 4-byte Folded Spill
+ adcl 44(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 40(%esp) # 4-byte Spill
+ adcl 56(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 44(%esp) # 4-byte Spill
+ adcl 36(%esp), %ebp # 4-byte Folded Reload
+ movl %ebp, 56(%esp) # 4-byte Spill
+ adcl $0, 32(%esp) # 4-byte Folded Spill
+ movl 456(%esp), %eax
+ movl 32(%eax), %eax
+ movl %eax, (%esp)
+ leal 64(%esp), %ecx
+ movl 452(%esp), %edx
+ movl 60(%esp), %ebx # 4-byte Reload
+ calll .LmulPv288x32
+ movl %esi, %ebp
+ addl 64(%esp), %ebp
+ movl 24(%esp), %edx # 4-byte Reload
+ adcl 68(%esp), %edx
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 72(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 100(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 96(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 92(%esp), %eax
+ movl %eax, 24(%esp) # 4-byte Spill
+ movl 88(%esp), %edi
+ movl 84(%esp), %ebx
+ movl 80(%esp), %esi
+ movl 76(%esp), %eax
+ movl 448(%esp), %ecx
+ movl %ebp, 32(%ecx)
+ movl %edx, 36(%ecx)
+ adcl 28(%esp), %eax # 4-byte Folded Reload
+ movl 52(%esp), %edx # 4-byte Reload
+ movl %edx, 40(%ecx)
+ adcl 48(%esp), %esi # 4-byte Folded Reload
+ movl %eax, 44(%ecx)
+ adcl 40(%esp), %ebx # 4-byte Folded Reload
+ movl %esi, 48(%ecx)
+ adcl 44(%esp), %edi # 4-byte Folded Reload
+ movl %ebx, 52(%ecx)
+ movl 24(%esp), %eax # 4-byte Reload
+ adcl 56(%esp), %eax # 4-byte Folded Reload
+ movl %edi, 56(%ecx)
+ movl %eax, 60(%ecx)
+ movl 36(%esp), %eax # 4-byte Reload
+ adcl 32(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 64(%ecx)
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl $0, %eax
+ movl %eax, 68(%ecx)
+ addl $428, %esp # imm = 0x1AC
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end128:
+ .size mcl_fpDbl_mulPre9L, .Lfunc_end128-mcl_fpDbl_mulPre9L
+
+ .globl mcl_fpDbl_sqrPre9L
+ .align 16, 0x90
+ .type mcl_fpDbl_sqrPre9L,@function
+mcl_fpDbl_sqrPre9L: # @mcl_fpDbl_sqrPre9L
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $428, %esp # imm = 0x1AC
+ calll .L129$pb
+.L129$pb:
+ popl %ebx
+.Ltmp10:
+ addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp10-.L129$pb), %ebx
+ movl %ebx, 60(%esp) # 4-byte Spill
+ movl 452(%esp), %edx
+ movl (%edx), %eax
+ movl %eax, (%esp)
+ leal 384(%esp), %ecx
+ movl %edx, %esi
+ movl %ebx, %edi
+ calll .LmulPv288x32
+ movl 420(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 416(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 412(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 408(%esp), %eax
+ movl %eax, 24(%esp) # 4-byte Spill
+ movl 404(%esp), %eax
+ movl %eax, 12(%esp) # 4-byte Spill
+ movl 400(%esp), %eax
+ movl %eax, 20(%esp) # 4-byte Spill
+ movl 396(%esp), %eax
+ movl %eax, 16(%esp) # 4-byte Spill
+ movl 392(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 384(%esp), %eax
+ movl 388(%esp), %ebp
+ movl 448(%esp), %ecx
+ movl %eax, (%ecx)
+ movl 4(%esi), %eax
+ movl %eax, (%esp)
+ leal 344(%esp), %ecx
+ movl %esi, %edx
+ movl %edi, %ebx
+ calll .LmulPv288x32
+ addl 344(%esp), %ebp
+ movl %ebp, 8(%esp) # 4-byte Spill
+ movl 380(%esp), %eax
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl 376(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 372(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 368(%esp), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 364(%esp), %ebx
+ movl 360(%esp), %edi
+ movl 356(%esp), %esi
+ movl 348(%esp), %ecx
+ movl 352(%esp), %edx
+ movl 448(%esp), %eax
+ movl 8(%esp), %ebp # 4-byte Reload
+ movl %ebp, 4(%eax)
+ adcl 40(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 40(%esp) # 4-byte Spill
+ adcl 16(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 16(%esp) # 4-byte Spill
+ adcl 20(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 20(%esp) # 4-byte Spill
+ adcl 12(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 8(%esp) # 4-byte Spill
+ adcl 24(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 4(%esp) # 4-byte Spill
+ movl 32(%esp), %eax # 4-byte Reload
+ adcl 36(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 44(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 52(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 56(%esp) # 4-byte Spill
+ adcl $0, 28(%esp) # 4-byte Folded Spill
+ movl 452(%esp), %edx
+ movl 8(%edx), %eax
+ movl %eax, (%esp)
+ leal 304(%esp), %ecx
+ movl 60(%esp), %ebx # 4-byte Reload
+ calll .LmulPv288x32
+ movl 40(%esp), %eax # 4-byte Reload
+ addl 304(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 340(%esp), %eax
+ movl %eax, 24(%esp) # 4-byte Spill
+ movl 336(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 332(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 328(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 324(%esp), %edi
+ movl 320(%esp), %ebp
+ movl 316(%esp), %esi
+ movl 308(%esp), %ecx
+ movl 312(%esp), %edx
+ movl 448(%esp), %eax
+ movl 40(%esp), %ebx # 4-byte Reload
+ movl %ebx, 8(%eax)
+ adcl 16(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 40(%esp) # 4-byte Spill
+ adcl 20(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 12(%esp) # 4-byte Spill
+ adcl 8(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 16(%esp) # 4-byte Spill
+ adcl 4(%esp), %ebp # 4-byte Folded Reload
+ movl %ebp, 8(%esp) # 4-byte Spill
+ adcl 32(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 4(%esp) # 4-byte Spill
+ movl 36(%esp), %eax # 4-byte Reload
+ adcl 48(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 56(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 28(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 52(%esp) # 4-byte Spill
+ adcl $0, 24(%esp) # 4-byte Folded Spill
+ movl 452(%esp), %edx
+ movl 12(%edx), %eax
+ movl %eax, (%esp)
+ leal 264(%esp), %ecx
+ movl 60(%esp), %ebx # 4-byte Reload
+ calll .LmulPv288x32
+ movl 40(%esp), %eax # 4-byte Reload
+ addl 264(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 300(%esp), %eax
+ movl %eax, 20(%esp) # 4-byte Spill
+ movl 296(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 292(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 288(%esp), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 284(%esp), %ebx
+ movl 280(%esp), %edi
+ movl 276(%esp), %esi
+ movl 268(%esp), %ecx
+ movl 272(%esp), %edx
+ movl 448(%esp), %eax
+ movl 40(%esp), %ebp # 4-byte Reload
+ movl %ebp, 12(%eax)
+ adcl 12(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, %ebp
+ adcl 16(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 40(%esp) # 4-byte Spill
+ adcl 8(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 16(%esp) # 4-byte Spill
+ adcl 4(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 8(%esp) # 4-byte Spill
+ adcl 36(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 36(%esp) # 4-byte Spill
+ movl 32(%esp), %eax # 4-byte Reload
+ adcl 44(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 52(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 24(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 56(%esp) # 4-byte Spill
+ adcl $0, 20(%esp) # 4-byte Folded Spill
+ movl 452(%esp), %edx
+ movl 16(%edx), %eax
+ movl %eax, (%esp)
+ leal 224(%esp), %ecx
+ movl 60(%esp), %ebx # 4-byte Reload
+ calll .LmulPv288x32
+ addl 224(%esp), %ebp
+ movl %ebp, 12(%esp) # 4-byte Spill
+ movl 260(%esp), %eax
+ movl %eax, 24(%esp) # 4-byte Spill
+ movl 256(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 252(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 248(%esp), %eax
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl 244(%esp), %edi
+ movl 240(%esp), %ebp
+ movl 236(%esp), %esi
+ movl 228(%esp), %ecx
+ movl 232(%esp), %edx
+ movl 448(%esp), %eax
+ movl 12(%esp), %ebx # 4-byte Reload
+ movl %ebx, 16(%eax)
+ adcl 40(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 40(%esp) # 4-byte Spill
+ adcl 16(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 12(%esp) # 4-byte Spill
+ adcl 8(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 16(%esp) # 4-byte Spill
+ adcl 36(%esp), %ebp # 4-byte Folded Reload
+ movl %ebp, 8(%esp) # 4-byte Spill
+ adcl 32(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 4(%esp) # 4-byte Spill
+ movl 28(%esp), %eax # 4-byte Reload
+ adcl 48(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 56(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 20(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 52(%esp) # 4-byte Spill
+ adcl $0, 24(%esp) # 4-byte Folded Spill
+ movl 452(%esp), %edx
+ movl 20(%edx), %eax
+ movl %eax, (%esp)
+ leal 184(%esp), %ecx
+ movl 60(%esp), %ebx # 4-byte Reload
+ calll .LmulPv288x32
+ movl 40(%esp), %eax # 4-byte Reload
+ addl 184(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 220(%esp), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 216(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 212(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 208(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 204(%esp), %ebx
+ movl 200(%esp), %edi
+ movl 196(%esp), %esi
+ movl 188(%esp), %ecx
+ movl 192(%esp), %edx
+ movl 448(%esp), %eax
+ movl 40(%esp), %ebp # 4-byte Reload
+ movl %ebp, 20(%eax)
+ adcl 12(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, %ebp
+ adcl 16(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 16(%esp) # 4-byte Spill
+ adcl 8(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 20(%esp) # 4-byte Spill
+ adcl 4(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 12(%esp) # 4-byte Spill
+ adcl 28(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 8(%esp) # 4-byte Spill
+ movl 36(%esp), %eax # 4-byte Reload
+ adcl 44(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 52(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 24(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 56(%esp) # 4-byte Spill
+ adcl $0, 32(%esp) # 4-byte Folded Spill
+ movl 452(%esp), %edx
+ movl 24(%edx), %eax
+ movl %eax, (%esp)
+ leal 144(%esp), %ecx
+ movl 60(%esp), %ebx # 4-byte Reload
+ calll .LmulPv288x32
+ addl 144(%esp), %ebp
+ movl %ebp, 24(%esp) # 4-byte Spill
+ movl 180(%esp), %eax
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl 176(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 172(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 168(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 164(%esp), %edi
+ movl 160(%esp), %ebp
+ movl 156(%esp), %esi
+ movl 148(%esp), %ecx
+ movl 152(%esp), %edx
+ movl 448(%esp), %eax
+ movl 24(%esp), %ebx # 4-byte Reload
+ movl %ebx, 24(%eax)
+ adcl 16(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 4(%esp) # 4-byte Spill
+ adcl 20(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 20(%esp) # 4-byte Spill
+ adcl 12(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 24(%esp) # 4-byte Spill
+ adcl 8(%esp), %ebp # 4-byte Folded Reload
+ movl %ebp, 16(%esp) # 4-byte Spill
+ adcl 36(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 12(%esp) # 4-byte Spill
+ movl 40(%esp), %eax # 4-byte Reload
+ adcl 48(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 56(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 32(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 52(%esp) # 4-byte Spill
+ adcl $0, 28(%esp) # 4-byte Folded Spill
+ movl 452(%esp), %edx
+ movl 28(%edx), %eax
+ movl %eax, (%esp)
+ leal 104(%esp), %ecx
+ movl 60(%esp), %ebx # 4-byte Reload
+ calll .LmulPv288x32
+ movl 4(%esp), %esi # 4-byte Reload
+ addl 104(%esp), %esi
+ movl 140(%esp), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 136(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 132(%esp), %ebp
+ movl 128(%esp), %ebx
+ movl 124(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 120(%esp), %edi
+ movl 116(%esp), %edx
+ movl 108(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 112(%esp), %ecx
+ movl 448(%esp), %eax
+ movl %esi, 28(%eax)
+ movl 48(%esp), %esi # 4-byte Reload
+ adcl 20(%esp), %esi # 4-byte Folded Reload
+ adcl 24(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 20(%esp) # 4-byte Spill
+ adcl 16(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 48(%esp) # 4-byte Spill
+ adcl 12(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 24(%esp) # 4-byte Spill
+ movl 40(%esp), %eax # 4-byte Reload
+ adcl %eax, 36(%esp) # 4-byte Folded Spill
+ adcl 44(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 44(%esp) # 4-byte Spill
+ adcl 52(%esp), %ebp # 4-byte Folded Reload
+ movl %ebp, 52(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 28(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 56(%esp) # 4-byte Spill
+ adcl $0, 32(%esp) # 4-byte Folded Spill
+ movl 452(%esp), %edx
+ movl 32(%edx), %eax
+ movl %eax, (%esp)
+ leal 64(%esp), %ecx
+ movl 60(%esp), %ebx # 4-byte Reload
+ calll .LmulPv288x32
+ movl %esi, %ebp
+ addl 64(%esp), %ebp
+ movl 20(%esp), %edx # 4-byte Reload
+ adcl 68(%esp), %edx
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 72(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 100(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 96(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 92(%esp), %eax
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl 88(%esp), %edi
+ movl 84(%esp), %ebx
+ movl 80(%esp), %esi
+ movl 76(%esp), %eax
+ movl 448(%esp), %ecx
+ movl %ebp, 32(%ecx)
+ movl %edx, 36(%ecx)
+ adcl 24(%esp), %eax # 4-byte Folded Reload
+ movl 48(%esp), %edx # 4-byte Reload
+ movl %edx, 40(%ecx)
+ adcl 36(%esp), %esi # 4-byte Folded Reload
+ movl %eax, 44(%ecx)
+ adcl 44(%esp), %ebx # 4-byte Folded Reload
+ movl %esi, 48(%ecx)
+ adcl 52(%esp), %edi # 4-byte Folded Reload
+ movl %ebx, 52(%ecx)
+ movl 28(%esp), %eax # 4-byte Reload
+ adcl 56(%esp), %eax # 4-byte Folded Reload
+ movl %edi, 56(%ecx)
+ movl %eax, 60(%ecx)
+ movl 40(%esp), %eax # 4-byte Reload
+ adcl 32(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 64(%ecx)
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl $0, %eax
+ movl %eax, 68(%ecx)
+ addl $428, %esp # imm = 0x1AC
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end129:
+ .size mcl_fpDbl_sqrPre9L, .Lfunc_end129-mcl_fpDbl_sqrPre9L
+
+ .globl mcl_fp_mont9L
+ .align 16, 0x90
+ .type mcl_fp_mont9L,@function
+mcl_fp_mont9L: # @mcl_fp_mont9L
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $796, %esp # imm = 0x31C
+ calll .L130$pb
+.L130$pb:
+ popl %ebx
+.Ltmp11:
+ addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp11-.L130$pb), %ebx
+ movl 828(%esp), %eax
+ movl -4(%eax), %edi
+ movl %edi, 28(%esp) # 4-byte Spill
+ movl 824(%esp), %eax
+ movl (%eax), %eax
+ movl %eax, (%esp)
+ leal 752(%esp), %ecx
+ movl 820(%esp), %edx
+ calll .LmulPv288x32
+ movl 752(%esp), %ebp
+ movl 756(%esp), %esi
+ movl %ebp, %eax
+ imull %edi, %eax
+ movl 788(%esp), %ecx
+ movl %ecx, 68(%esp) # 4-byte Spill
+ movl 784(%esp), %ecx
+ movl %ecx, 56(%esp) # 4-byte Spill
+ movl 780(%esp), %ecx
+ movl %ecx, 60(%esp) # 4-byte Spill
+ movl 776(%esp), %ecx
+ movl %ecx, 48(%esp) # 4-byte Spill
+ movl 772(%esp), %ecx
+ movl %ecx, 36(%esp) # 4-byte Spill
+ movl 768(%esp), %ecx
+ movl %ecx, 32(%esp) # 4-byte Spill
+ movl 764(%esp), %ecx
+ movl %ecx, 44(%esp) # 4-byte Spill
+ movl 760(%esp), %edi
+ movl %eax, (%esp)
+ leal 712(%esp), %ecx
+ movl 828(%esp), %edx
+ calll .LmulPv288x32
+ addl 712(%esp), %ebp
+ adcl 716(%esp), %esi
+ adcl 720(%esp), %edi
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 724(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 32(%esp), %eax # 4-byte Reload
+ adcl 728(%esp), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 36(%esp), %eax # 4-byte Reload
+ adcl 732(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 736(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 740(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 744(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 68(%esp), %ebp # 4-byte Reload
+ adcl 748(%esp), %ebp
+ sbbl %eax, %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 824(%esp), %eax
+ movl 4(%eax), %eax
+ movl %eax, (%esp)
+ leal 672(%esp), %ecx
+ movl 820(%esp), %edx
+ calll .LmulPv288x32
+ movl 64(%esp), %eax # 4-byte Reload
+ andl $1, %eax
+ addl 672(%esp), %esi
+ adcl 676(%esp), %edi
+ movl 44(%esp), %ecx # 4-byte Reload
+ adcl 680(%esp), %ecx
+ movl %ecx, 44(%esp) # 4-byte Spill
+ movl 32(%esp), %ecx # 4-byte Reload
+ adcl 684(%esp), %ecx
+ movl %ecx, 32(%esp) # 4-byte Spill
+ movl 36(%esp), %ecx # 4-byte Reload
+ adcl 688(%esp), %ecx
+ movl %ecx, 36(%esp) # 4-byte Spill
+ movl 48(%esp), %ecx # 4-byte Reload
+ adcl 692(%esp), %ecx
+ movl %ecx, 48(%esp) # 4-byte Spill
+ movl 60(%esp), %ecx # 4-byte Reload
+ adcl 696(%esp), %ecx
+ movl %ecx, 60(%esp) # 4-byte Spill
+ movl 56(%esp), %ecx # 4-byte Reload
+ adcl 700(%esp), %ecx
+ movl %ecx, 56(%esp) # 4-byte Spill
+ adcl 704(%esp), %ebp
+ movl %ebp, 68(%esp) # 4-byte Spill
+ adcl 708(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ sbbl %ebp, %ebp
+ movl %esi, %eax
+ imull 28(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 632(%esp), %ecx
+ movl 828(%esp), %eax
+ movl %eax, %edx
+ calll .LmulPv288x32
+ andl $1, %ebp
+ addl 632(%esp), %esi
+ adcl 636(%esp), %edi
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 640(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 32(%esp), %eax # 4-byte Reload
+ adcl 644(%esp), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 36(%esp), %eax # 4-byte Reload
+ adcl 648(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 652(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 656(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 56(%esp), %esi # 4-byte Reload
+ adcl 660(%esp), %esi
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 664(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 668(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ adcl $0, %ebp
+ movl 824(%esp), %eax
+ movl 8(%eax), %eax
+ movl %eax, (%esp)
+ leal 592(%esp), %ecx
+ movl 820(%esp), %edx
+ calll .LmulPv288x32
+ addl 592(%esp), %edi
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 596(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 32(%esp), %eax # 4-byte Reload
+ adcl 600(%esp), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 36(%esp), %eax # 4-byte Reload
+ adcl 604(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 608(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 612(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ adcl 616(%esp), %esi
+ movl %esi, 56(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 620(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 64(%esp), %esi # 4-byte Reload
+ adcl 624(%esp), %esi
+ adcl 628(%esp), %ebp
+ movl %ebp, 52(%esp) # 4-byte Spill
+ sbbl %ebp, %ebp
+ movl %edi, %eax
+ imull 28(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 552(%esp), %ecx
+ movl 828(%esp), %edx
+ calll .LmulPv288x32
+ andl $1, %ebp
+ addl 552(%esp), %edi
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 556(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 32(%esp), %eax # 4-byte Reload
+ adcl 560(%esp), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 36(%esp), %eax # 4-byte Reload
+ adcl 564(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 568(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 572(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 576(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 68(%esp), %edi # 4-byte Reload
+ adcl 580(%esp), %edi
+ adcl 584(%esp), %esi
+ movl %esi, 64(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 588(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ adcl $0, %ebp
+ movl 824(%esp), %eax
+ movl 12(%eax), %eax
+ movl %eax, (%esp)
+ leal 512(%esp), %ecx
+ movl 820(%esp), %edx
+ calll .LmulPv288x32
+ movl 44(%esp), %ecx # 4-byte Reload
+ addl 512(%esp), %ecx
+ movl 32(%esp), %eax # 4-byte Reload
+ adcl 516(%esp), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 36(%esp), %eax # 4-byte Reload
+ adcl 520(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 48(%esp), %esi # 4-byte Reload
+ adcl 524(%esp), %esi
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 528(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 532(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ adcl 536(%esp), %edi
+ movl %edi, 68(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 540(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 52(%esp), %edi # 4-byte Reload
+ adcl 544(%esp), %edi
+ adcl 548(%esp), %ebp
+ movl %ebp, 44(%esp) # 4-byte Spill
+ sbbl %eax, %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ movl %ecx, %ebp
+ imull 28(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 472(%esp), %ecx
+ movl 828(%esp), %edx
+ calll .LmulPv288x32
+ movl 40(%esp), %eax # 4-byte Reload
+ andl $1, %eax
+ addl 472(%esp), %ebp
+ movl 32(%esp), %ecx # 4-byte Reload
+ adcl 476(%esp), %ecx
+ movl %ecx, 32(%esp) # 4-byte Spill
+ movl 36(%esp), %ecx # 4-byte Reload
+ adcl 480(%esp), %ecx
+ movl %ecx, 36(%esp) # 4-byte Spill
+ adcl 484(%esp), %esi
+ movl %esi, 48(%esp) # 4-byte Spill
+ movl 60(%esp), %ecx # 4-byte Reload
+ adcl 488(%esp), %ecx
+ movl %ecx, 60(%esp) # 4-byte Spill
+ movl 56(%esp), %ecx # 4-byte Reload
+ adcl 492(%esp), %ecx
+ movl %ecx, 56(%esp) # 4-byte Spill
+ movl 68(%esp), %ebp # 4-byte Reload
+ adcl 496(%esp), %ebp
+ movl 64(%esp), %esi # 4-byte Reload
+ adcl 500(%esp), %esi
+ adcl 504(%esp), %edi
+ movl %edi, 52(%esp) # 4-byte Spill
+ movl 44(%esp), %ecx # 4-byte Reload
+ adcl 508(%esp), %ecx
+ movl %ecx, 44(%esp) # 4-byte Spill
+ adcl $0, %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 824(%esp), %eax
+ movl 16(%eax), %eax
+ movl %eax, (%esp)
+ leal 432(%esp), %ecx
+ movl 820(%esp), %eax
+ movl %eax, %edx
+ calll .LmulPv288x32
+ movl 32(%esp), %ecx # 4-byte Reload
+ addl 432(%esp), %ecx
+ movl 36(%esp), %eax # 4-byte Reload
+ adcl 436(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 440(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 60(%esp), %edi # 4-byte Reload
+ adcl 444(%esp), %edi
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 448(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ adcl 452(%esp), %ebp
+ movl %ebp, 68(%esp) # 4-byte Spill
+ adcl 456(%esp), %esi
+ movl %esi, 64(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 460(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 464(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 40(%esp), %eax # 4-byte Reload
+ adcl 468(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ sbbl %esi, %esi
+ movl %ecx, %eax
+ movl %ecx, %ebp
+ imull 28(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 392(%esp), %ecx
+ movl 828(%esp), %edx
+ calll .LmulPv288x32
+ movl %esi, %eax
+ andl $1, %eax
+ addl 392(%esp), %ebp
+ movl 36(%esp), %esi # 4-byte Reload
+ adcl 396(%esp), %esi
+ movl 48(%esp), %ecx # 4-byte Reload
+ adcl 400(%esp), %ecx
+ movl %ecx, 48(%esp) # 4-byte Spill
+ adcl 404(%esp), %edi
+ movl %edi, 60(%esp) # 4-byte Spill
+ movl 56(%esp), %ebp # 4-byte Reload
+ adcl 408(%esp), %ebp
+ movl 68(%esp), %ecx # 4-byte Reload
+ adcl 412(%esp), %ecx
+ movl %ecx, 68(%esp) # 4-byte Spill
+ movl 64(%esp), %ecx # 4-byte Reload
+ adcl 416(%esp), %ecx
+ movl %ecx, 64(%esp) # 4-byte Spill
+ movl 52(%esp), %ecx # 4-byte Reload
+ adcl 420(%esp), %ecx
+ movl %ecx, 52(%esp) # 4-byte Spill
+ movl 44(%esp), %ecx # 4-byte Reload
+ adcl 424(%esp), %ecx
+ movl %ecx, 44(%esp) # 4-byte Spill
+ movl 40(%esp), %edi # 4-byte Reload
+ adcl 428(%esp), %edi
+ adcl $0, %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 824(%esp), %eax
+ movl 20(%eax), %eax
+ movl %eax, (%esp)
+ leal 352(%esp), %ecx
+ movl 820(%esp), %edx
+ calll .LmulPv288x32
+ addl 352(%esp), %esi
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 356(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 360(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ adcl 364(%esp), %ebp
+ movl %ebp, 56(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 368(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 64(%esp), %ebp # 4-byte Reload
+ adcl 372(%esp), %ebp
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 376(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 380(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ adcl 384(%esp), %edi
+ movl %edi, 40(%esp) # 4-byte Spill
+ movl 36(%esp), %eax # 4-byte Reload
+ adcl 388(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ sbbl %edi, %edi
+ movl %esi, %eax
+ imull 28(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 312(%esp), %ecx
+ movl 828(%esp), %edx
+ calll .LmulPv288x32
+ movl %edi, %eax
+ andl $1, %eax
+ addl 312(%esp), %esi
+ movl 48(%esp), %ecx # 4-byte Reload
+ adcl 316(%esp), %ecx
+ movl %ecx, 48(%esp) # 4-byte Spill
+ movl 60(%esp), %ecx # 4-byte Reload
+ adcl 320(%esp), %ecx
+ movl %ecx, 60(%esp) # 4-byte Spill
+ movl 56(%esp), %ecx # 4-byte Reload
+ adcl 324(%esp), %ecx
+ movl %ecx, 56(%esp) # 4-byte Spill
+ movl 68(%esp), %ecx # 4-byte Reload
+ adcl 328(%esp), %ecx
+ movl %ecx, 68(%esp) # 4-byte Spill
+ adcl 332(%esp), %ebp
+ movl %ebp, 64(%esp) # 4-byte Spill
+ movl 52(%esp), %esi # 4-byte Reload
+ adcl 336(%esp), %esi
+ movl 44(%esp), %edi # 4-byte Reload
+ adcl 340(%esp), %edi
+ movl 40(%esp), %ecx # 4-byte Reload
+ adcl 344(%esp), %ecx
+ movl %ecx, 40(%esp) # 4-byte Spill
+ movl 36(%esp), %ecx # 4-byte Reload
+ adcl 348(%esp), %ecx
+ movl %ecx, 36(%esp) # 4-byte Spill
+ adcl $0, %eax
+ movl %eax, %ebp
+ movl 824(%esp), %eax
+ movl 24(%eax), %eax
+ movl %eax, (%esp)
+ leal 272(%esp), %ecx
+ movl 820(%esp), %edx
+ calll .LmulPv288x32
+ movl 48(%esp), %ecx # 4-byte Reload
+ addl 272(%esp), %ecx
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 276(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 280(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 284(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 288(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ adcl 292(%esp), %esi
+ movl %esi, 52(%esp) # 4-byte Spill
+ adcl 296(%esp), %edi
+ movl %edi, 44(%esp) # 4-byte Spill
+ movl 40(%esp), %eax # 4-byte Reload
+ adcl 300(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 36(%esp), %eax # 4-byte Reload
+ adcl 304(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ adcl 308(%esp), %ebp
+ movl %ebp, 48(%esp) # 4-byte Spill
+ sbbl %edi, %edi
+ movl %ecx, %eax
+ movl %ecx, %esi
+ imull 28(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 232(%esp), %ecx
+ movl 828(%esp), %edx
+ calll .LmulPv288x32
+ movl %edi, %ecx
+ andl $1, %ecx
+ addl 232(%esp), %esi
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 236(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 56(%esp), %esi # 4-byte Reload
+ adcl 240(%esp), %esi
+ movl 68(%esp), %edi # 4-byte Reload
+ adcl 244(%esp), %edi
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 248(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 252(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 256(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 40(%esp), %ebp # 4-byte Reload
+ adcl 260(%esp), %ebp
+ movl 36(%esp), %eax # 4-byte Reload
+ adcl 264(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 268(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ adcl $0, %ecx
+ movl %ecx, 32(%esp) # 4-byte Spill
+ movl 824(%esp), %eax
+ movl 28(%eax), %eax
+ movl %eax, (%esp)
+ leal 192(%esp), %ecx
+ movl 820(%esp), %edx
+ calll .LmulPv288x32
+ movl 60(%esp), %ecx # 4-byte Reload
+ addl 192(%esp), %ecx
+ adcl 196(%esp), %esi
+ movl %esi, 56(%esp) # 4-byte Spill
+ adcl 200(%esp), %edi
+ movl %edi, 68(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 204(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 208(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 44(%esp), %esi # 4-byte Reload
+ adcl 212(%esp), %esi
+ adcl 216(%esp), %ebp
+ movl %ebp, 40(%esp) # 4-byte Spill
+ movl 36(%esp), %eax # 4-byte Reload
+ adcl 220(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 224(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 32(%esp), %eax # 4-byte Reload
+ adcl 228(%esp), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ sbbl %ebp, %ebp
+ movl %ecx, %eax
+ movl %ecx, %edi
+ imull 28(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 152(%esp), %ecx
+ movl 828(%esp), %edx
+ calll .LmulPv288x32
+ andl $1, %ebp
+ addl 152(%esp), %edi
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 156(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 160(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 64(%esp), %edi # 4-byte Reload
+ adcl 164(%esp), %edi
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 168(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ adcl 172(%esp), %esi
+ movl %esi, 44(%esp) # 4-byte Spill
+ movl 40(%esp), %eax # 4-byte Reload
+ adcl 176(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 36(%esp), %esi # 4-byte Reload
+ adcl 180(%esp), %esi
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 184(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 32(%esp), %eax # 4-byte Reload
+ adcl 188(%esp), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ adcl $0, %ebp
+ movl %ebp, 60(%esp) # 4-byte Spill
+ movl 824(%esp), %eax
+ movl 32(%eax), %eax
+ movl %eax, (%esp)
+ leal 112(%esp), %ecx
+ movl 820(%esp), %edx
+ calll .LmulPv288x32
+ movl 56(%esp), %ecx # 4-byte Reload
+ addl 112(%esp), %ecx
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 116(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ adcl 120(%esp), %edi
+ movl %edi, 64(%esp) # 4-byte Spill
+ movl 40(%esp), %ebp # 4-byte Reload
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 124(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 128(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ adcl 132(%esp), %ebp
+ adcl 136(%esp), %esi
+ movl %esi, 36(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 140(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 32(%esp), %eax # 4-byte Reload
+ adcl 144(%esp), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 148(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ sbbl %edi, %edi
+ movl 28(%esp), %eax # 4-byte Reload
+ imull %ecx, %eax
+ movl %ecx, %esi
+ movl %eax, (%esp)
+ leal 72(%esp), %ecx
+ movl 828(%esp), %edx
+ calll .LmulPv288x32
+ andl $1, %edi
+ addl 72(%esp), %esi
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 76(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 64(%esp), %ecx # 4-byte Reload
+ adcl 80(%esp), %ecx
+ movl %ecx, 64(%esp) # 4-byte Spill
+ movl 52(%esp), %edx # 4-byte Reload
+ adcl 84(%esp), %edx
+ movl %edx, 52(%esp) # 4-byte Spill
+ movl %edx, %esi
+ movl 44(%esp), %edx # 4-byte Reload
+ adcl 88(%esp), %edx
+ movl %edx, 44(%esp) # 4-byte Spill
+ adcl 92(%esp), %ebp
+ movl %ebp, 40(%esp) # 4-byte Spill
+ movl 36(%esp), %edx # 4-byte Reload
+ adcl 96(%esp), %edx
+ movl %edx, 36(%esp) # 4-byte Spill
+ movl 48(%esp), %edx # 4-byte Reload
+ adcl 100(%esp), %edx
+ movl %edx, 48(%esp) # 4-byte Spill
+ movl 32(%esp), %edx # 4-byte Reload
+ adcl 104(%esp), %edx
+ movl %edx, 32(%esp) # 4-byte Spill
+ movl 60(%esp), %edx # 4-byte Reload
+ adcl 108(%esp), %edx
+ movl %edx, 60(%esp) # 4-byte Spill
+ adcl $0, %edi
+ movl 828(%esp), %ebx
+ subl (%ebx), %eax
+ movl %ecx, %edx
+ sbbl 4(%ebx), %edx
+ movl %esi, %ecx
+ sbbl 8(%ebx), %ecx
+ movl 44(%esp), %esi # 4-byte Reload
+ sbbl 12(%ebx), %esi
+ movl %esi, 16(%esp) # 4-byte Spill
+ sbbl 16(%ebx), %ebp
+ movl %ebp, 20(%esp) # 4-byte Spill
+ movl 36(%esp), %esi # 4-byte Reload
+ sbbl 20(%ebx), %esi
+ movl %esi, 24(%esp) # 4-byte Spill
+ movl 48(%esp), %esi # 4-byte Reload
+ sbbl 24(%ebx), %esi
+ movl %esi, 28(%esp) # 4-byte Spill
+ movl 32(%esp), %esi # 4-byte Reload
+ sbbl 28(%ebx), %esi
+ movl 60(%esp), %ebp # 4-byte Reload
+ sbbl 32(%ebx), %ebp
+ movl %ebp, 56(%esp) # 4-byte Spill
+ sbbl $0, %edi
+ andl $1, %edi
+ movl %edi, %ebx
+ jne .LBB130_2
+# BB#1:
+ movl %esi, 32(%esp) # 4-byte Spill
+.LBB130_2:
+ testb %bl, %bl
+ movl 68(%esp), %esi # 4-byte Reload
+ jne .LBB130_4
+# BB#3:
+ movl %eax, %esi
+.LBB130_4:
+ movl 816(%esp), %ebp
+ movl %esi, (%ebp)
+ movl 64(%esp), %eax # 4-byte Reload
+ jne .LBB130_6
+# BB#5:
+ movl %edx, %eax
+.LBB130_6:
+ movl %eax, 4(%ebp)
+ movl 52(%esp), %eax # 4-byte Reload
+ jne .LBB130_8
+# BB#7:
+ movl %ecx, %eax
+.LBB130_8:
+ movl %eax, 8(%ebp)
+ movl 44(%esp), %eax # 4-byte Reload
+ jne .LBB130_10
+# BB#9:
+ movl 16(%esp), %eax # 4-byte Reload
+.LBB130_10:
+ movl %eax, 12(%ebp)
+ jne .LBB130_12
+# BB#11:
+ movl 20(%esp), %eax # 4-byte Reload
+ movl %eax, 40(%esp) # 4-byte Spill
+.LBB130_12:
+ movl 40(%esp), %eax # 4-byte Reload
+ movl %eax, 16(%ebp)
+ movl 36(%esp), %eax # 4-byte Reload
+ jne .LBB130_14
+# BB#13:
+ movl 24(%esp), %eax # 4-byte Reload
+.LBB130_14:
+ movl %eax, 20(%ebp)
+ movl 48(%esp), %eax # 4-byte Reload
+ jne .LBB130_16
+# BB#15:
+ movl 28(%esp), %eax # 4-byte Reload
+.LBB130_16:
+ movl %eax, 24(%ebp)
+ movl 32(%esp), %eax # 4-byte Reload
+ movl %eax, 28(%ebp)
+ movl 60(%esp), %eax # 4-byte Reload
+ jne .LBB130_18
+# BB#17:
+ movl 56(%esp), %eax # 4-byte Reload
+.LBB130_18:
+ movl %eax, 32(%ebp)
+ addl $796, %esp # imm = 0x31C
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end130:
+ .size mcl_fp_mont9L, .Lfunc_end130-mcl_fp_mont9L
+
+ .globl mcl_fp_montNF9L
+ .align 16, 0x90
+ .type mcl_fp_montNF9L,@function
+mcl_fp_montNF9L: # @mcl_fp_montNF9L
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $796, %esp # imm = 0x31C
+ calll .L131$pb
+.L131$pb:
+ popl %ebx
+.Ltmp12:
+ addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp12-.L131$pb), %ebx
+ movl 828(%esp), %eax
+ movl -4(%eax), %edi
+ movl %edi, 28(%esp) # 4-byte Spill
+ movl 824(%esp), %eax
+ movl (%eax), %eax
+ movl %eax, (%esp)
+ leal 752(%esp), %ecx
+ movl 820(%esp), %edx
+ calll .LmulPv288x32
+ movl 752(%esp), %esi
+ movl 756(%esp), %ebp
+ movl %esi, %eax
+ imull %edi, %eax
+ movl 788(%esp), %ecx
+ movl %ecx, 68(%esp) # 4-byte Spill
+ movl 784(%esp), %ecx
+ movl %ecx, 60(%esp) # 4-byte Spill
+ movl 780(%esp), %ecx
+ movl %ecx, 36(%esp) # 4-byte Spill
+ movl 776(%esp), %ecx
+ movl %ecx, 56(%esp) # 4-byte Spill
+ movl 772(%esp), %ecx
+ movl %ecx, 48(%esp) # 4-byte Spill
+ movl 768(%esp), %ecx
+ movl %ecx, 40(%esp) # 4-byte Spill
+ movl 764(%esp), %ecx
+ movl %ecx, 32(%esp) # 4-byte Spill
+ movl 760(%esp), %edi
+ movl %eax, (%esp)
+ leal 712(%esp), %ecx
+ movl 828(%esp), %edx
+ calll .LmulPv288x32
+ addl 712(%esp), %esi
+ adcl 716(%esp), %ebp
+ adcl 720(%esp), %edi
+ movl 32(%esp), %eax # 4-byte Reload
+ adcl 724(%esp), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 40(%esp), %eax # 4-byte Reload
+ adcl 728(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 732(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 736(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 36(%esp), %esi # 4-byte Reload
+ adcl 740(%esp), %esi
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 744(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 748(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 824(%esp), %eax
+ movl 4(%eax), %eax
+ movl %eax, (%esp)
+ leal 672(%esp), %ecx
+ movl 820(%esp), %edx
+ calll .LmulPv288x32
+ movl 708(%esp), %eax
+ addl 672(%esp), %ebp
+ adcl 676(%esp), %edi
+ movl 32(%esp), %ecx # 4-byte Reload
+ adcl 680(%esp), %ecx
+ movl %ecx, 32(%esp) # 4-byte Spill
+ movl 40(%esp), %ecx # 4-byte Reload
+ adcl 684(%esp), %ecx
+ movl %ecx, 40(%esp) # 4-byte Spill
+ movl 48(%esp), %ecx # 4-byte Reload
+ adcl 688(%esp), %ecx
+ movl %ecx, 48(%esp) # 4-byte Spill
+ movl 56(%esp), %ecx # 4-byte Reload
+ adcl 692(%esp), %ecx
+ movl %ecx, 56(%esp) # 4-byte Spill
+ adcl 696(%esp), %esi
+ movl 60(%esp), %ecx # 4-byte Reload
+ adcl 700(%esp), %ecx
+ movl %ecx, 60(%esp) # 4-byte Spill
+ movl 68(%esp), %ecx # 4-byte Reload
+ adcl 704(%esp), %ecx
+ movl %ecx, 68(%esp) # 4-byte Spill
+ adcl $0, %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl %ebp, %eax
+ imull 28(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 632(%esp), %ecx
+ movl 828(%esp), %edx
+ calll .LmulPv288x32
+ addl 632(%esp), %ebp
+ adcl 636(%esp), %edi
+ movl 32(%esp), %eax # 4-byte Reload
+ adcl 640(%esp), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 40(%esp), %eax # 4-byte Reload
+ adcl 644(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 648(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 652(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ adcl 656(%esp), %esi
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 660(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 68(%esp), %ebp # 4-byte Reload
+ adcl 664(%esp), %ebp
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 668(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 824(%esp), %eax
+ movl 8(%eax), %eax
+ movl %eax, (%esp)
+ leal 592(%esp), %ecx
+ movl 820(%esp), %edx
+ calll .LmulPv288x32
+ movl 628(%esp), %eax
+ addl 592(%esp), %edi
+ movl 32(%esp), %ecx # 4-byte Reload
+ adcl 596(%esp), %ecx
+ movl %ecx, 32(%esp) # 4-byte Spill
+ movl 40(%esp), %ecx # 4-byte Reload
+ adcl 600(%esp), %ecx
+ movl %ecx, 40(%esp) # 4-byte Spill
+ movl 48(%esp), %ecx # 4-byte Reload
+ adcl 604(%esp), %ecx
+ movl %ecx, 48(%esp) # 4-byte Spill
+ movl 56(%esp), %ecx # 4-byte Reload
+ adcl 608(%esp), %ecx
+ movl %ecx, 56(%esp) # 4-byte Spill
+ adcl 612(%esp), %esi
+ movl 60(%esp), %ecx # 4-byte Reload
+ adcl 616(%esp), %ecx
+ movl %ecx, 60(%esp) # 4-byte Spill
+ adcl 620(%esp), %ebp
+ movl %ebp, 68(%esp) # 4-byte Spill
+ movl 64(%esp), %ecx # 4-byte Reload
+ adcl 624(%esp), %ecx
+ movl %ecx, 64(%esp) # 4-byte Spill
+ movl %eax, %ebp
+ adcl $0, %ebp
+ movl %edi, %eax
+ imull 28(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 552(%esp), %ecx
+ movl 828(%esp), %eax
+ movl %eax, %edx
+ calll .LmulPv288x32
+ addl 552(%esp), %edi
+ movl 32(%esp), %eax # 4-byte Reload
+ adcl 556(%esp), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 40(%esp), %eax # 4-byte Reload
+ adcl 560(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 564(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 568(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ adcl 572(%esp), %esi
+ movl 60(%esp), %edi # 4-byte Reload
+ adcl 576(%esp), %edi
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 580(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 584(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ adcl 588(%esp), %ebp
+ movl %ebp, 44(%esp) # 4-byte Spill
+ movl 824(%esp), %eax
+ movl 12(%eax), %eax
+ movl %eax, (%esp)
+ leal 512(%esp), %ecx
+ movl 820(%esp), %edx
+ calll .LmulPv288x32
+ movl 548(%esp), %eax
+ movl 32(%esp), %edx # 4-byte Reload
+ addl 512(%esp), %edx
+ movl %edx, 32(%esp) # 4-byte Spill
+ movl 40(%esp), %ecx # 4-byte Reload
+ adcl 516(%esp), %ecx
+ movl %ecx, 40(%esp) # 4-byte Spill
+ movl 48(%esp), %ecx # 4-byte Reload
+ adcl 520(%esp), %ecx
+ movl %ecx, 48(%esp) # 4-byte Spill
+ movl 56(%esp), %ecx # 4-byte Reload
+ adcl 524(%esp), %ecx
+ movl %ecx, 56(%esp) # 4-byte Spill
+ adcl 528(%esp), %esi
+ movl %esi, 36(%esp) # 4-byte Spill
+ adcl 532(%esp), %edi
+ movl %edi, 60(%esp) # 4-byte Spill
+ movl 68(%esp), %ebp # 4-byte Reload
+ adcl 536(%esp), %ebp
+ movl 64(%esp), %edi # 4-byte Reload
+ adcl 540(%esp), %edi
+ movl 44(%esp), %esi # 4-byte Reload
+ adcl 544(%esp), %esi
+ adcl $0, %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl %edx, %eax
+ imull 28(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 472(%esp), %ecx
+ movl 828(%esp), %edx
+ calll .LmulPv288x32
+ movl 32(%esp), %eax # 4-byte Reload
+ addl 472(%esp), %eax
+ movl 40(%esp), %eax # 4-byte Reload
+ adcl 476(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 480(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 484(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 36(%esp), %eax # 4-byte Reload
+ adcl 488(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 492(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ adcl 496(%esp), %ebp
+ movl %ebp, 68(%esp) # 4-byte Spill
+ adcl 500(%esp), %edi
+ movl %edi, %ebp
+ adcl 504(%esp), %esi
+ movl %esi, 44(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 508(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 824(%esp), %eax
+ movl 16(%eax), %eax
+ movl %eax, (%esp)
+ leal 432(%esp), %ecx
+ movl 820(%esp), %eax
+ movl %eax, %edx
+ calll .LmulPv288x32
+ movl 468(%esp), %eax
+ movl 40(%esp), %ecx # 4-byte Reload
+ addl 432(%esp), %ecx
+ movl 48(%esp), %esi # 4-byte Reload
+ adcl 436(%esp), %esi
+ movl 56(%esp), %edi # 4-byte Reload
+ adcl 440(%esp), %edi
+ movl 36(%esp), %edx # 4-byte Reload
+ adcl 444(%esp), %edx
+ movl %edx, 36(%esp) # 4-byte Spill
+ movl 60(%esp), %edx # 4-byte Reload
+ adcl 448(%esp), %edx
+ movl %edx, 60(%esp) # 4-byte Spill
+ movl 68(%esp), %edx # 4-byte Reload
+ adcl 452(%esp), %edx
+ movl %edx, 68(%esp) # 4-byte Spill
+ adcl 456(%esp), %ebp
+ movl %ebp, 64(%esp) # 4-byte Spill
+ movl 44(%esp), %edx # 4-byte Reload
+ adcl 460(%esp), %edx
+ movl %edx, 44(%esp) # 4-byte Spill
+ movl 52(%esp), %edx # 4-byte Reload
+ adcl 464(%esp), %edx
+ movl %edx, 52(%esp) # 4-byte Spill
+ adcl $0, %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ movl %ecx, %ebp
+ imull 28(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 392(%esp), %ecx
+ movl 828(%esp), %edx
+ calll .LmulPv288x32
+ addl 392(%esp), %ebp
+ adcl 396(%esp), %esi
+ movl %esi, 48(%esp) # 4-byte Spill
+ adcl 400(%esp), %edi
+ movl %edi, 56(%esp) # 4-byte Spill
+ movl 36(%esp), %eax # 4-byte Reload
+ adcl 404(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 408(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 68(%esp), %ebp # 4-byte Reload
+ adcl 412(%esp), %ebp
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 416(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 420(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 52(%esp), %edi # 4-byte Reload
+ adcl 424(%esp), %edi
+ movl 40(%esp), %esi # 4-byte Reload
+ adcl 428(%esp), %esi
+ movl 824(%esp), %eax
+ movl 20(%eax), %eax
+ movl %eax, (%esp)
+ leal 352(%esp), %ecx
+ movl 820(%esp), %edx
+ calll .LmulPv288x32
+ movl 388(%esp), %eax
+ movl 48(%esp), %ecx # 4-byte Reload
+ addl 352(%esp), %ecx
+ movl 56(%esp), %edx # 4-byte Reload
+ adcl 356(%esp), %edx
+ movl %edx, 56(%esp) # 4-byte Spill
+ movl 36(%esp), %edx # 4-byte Reload
+ adcl 360(%esp), %edx
+ movl %edx, 36(%esp) # 4-byte Spill
+ movl 60(%esp), %edx # 4-byte Reload
+ adcl 364(%esp), %edx
+ movl %edx, 60(%esp) # 4-byte Spill
+ adcl 368(%esp), %ebp
+ movl %ebp, 68(%esp) # 4-byte Spill
+ movl 64(%esp), %edx # 4-byte Reload
+ adcl 372(%esp), %edx
+ movl %edx, 64(%esp) # 4-byte Spill
+ movl 44(%esp), %edx # 4-byte Reload
+ adcl 376(%esp), %edx
+ movl %edx, 44(%esp) # 4-byte Spill
+ adcl 380(%esp), %edi
+ movl %edi, 52(%esp) # 4-byte Spill
+ adcl 384(%esp), %esi
+ movl %esi, 40(%esp) # 4-byte Spill
+ movl %eax, %ebp
+ adcl $0, %ebp
+ movl %ecx, %eax
+ movl %ecx, %edi
+ imull 28(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 312(%esp), %ecx
+ movl 828(%esp), %edx
+ calll .LmulPv288x32
+ addl 312(%esp), %edi
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 316(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 36(%esp), %eax # 4-byte Reload
+ adcl 320(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 60(%esp), %edi # 4-byte Reload
+ adcl 324(%esp), %edi
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 328(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 332(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 336(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 52(%esp), %esi # 4-byte Reload
+ adcl 340(%esp), %esi
+ movl 40(%esp), %eax # 4-byte Reload
+ adcl 344(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ adcl 348(%esp), %ebp
+ movl %ebp, 48(%esp) # 4-byte Spill
+ movl 824(%esp), %eax
+ movl 24(%eax), %eax
+ movl %eax, (%esp)
+ leal 272(%esp), %ecx
+ movl 820(%esp), %edx
+ calll .LmulPv288x32
+ movl 308(%esp), %edx
+ movl 56(%esp), %ecx # 4-byte Reload
+ addl 272(%esp), %ecx
+ movl 36(%esp), %eax # 4-byte Reload
+ adcl 276(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ adcl 280(%esp), %edi
+ movl %edi, 60(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 284(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 288(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 44(%esp), %ebp # 4-byte Reload
+ adcl 292(%esp), %ebp
+ adcl 296(%esp), %esi
+ movl %esi, 52(%esp) # 4-byte Spill
+ movl 40(%esp), %eax # 4-byte Reload
+ adcl 300(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 304(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ adcl $0, %edx
+ movl %edx, 56(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ movl %ecx, %edi
+ imull 28(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 232(%esp), %ecx
+ movl 828(%esp), %edx
+ calll .LmulPv288x32
+ addl 232(%esp), %edi
+ movl 36(%esp), %esi # 4-byte Reload
+ adcl 236(%esp), %esi
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 240(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 244(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 248(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl %ebp, %edi
+ adcl 252(%esp), %edi
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 256(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 40(%esp), %eax # 4-byte Reload
+ adcl 260(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 264(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 268(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 824(%esp), %eax
+ movl 28(%eax), %eax
+ movl %eax, (%esp)
+ leal 192(%esp), %ecx
+ movl 820(%esp), %edx
+ calll .LmulPv288x32
+ movl 228(%esp), %ebp
+ movl %esi, %ecx
+ addl 192(%esp), %ecx
+ movl 60(%esp), %esi # 4-byte Reload
+ adcl 196(%esp), %esi
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 200(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 204(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ adcl 208(%esp), %edi
+ movl %edi, 44(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 212(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 40(%esp), %eax # 4-byte Reload
+ adcl 216(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 220(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 224(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ adcl $0, %ebp
+ movl %ecx, %eax
+ movl %ecx, %edi
+ imull 28(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 152(%esp), %ecx
+ movl 828(%esp), %edx
+ calll .LmulPv288x32
+ addl 152(%esp), %edi
+ adcl 156(%esp), %esi
+ movl %esi, 60(%esp) # 4-byte Spill
+ movl 68(%esp), %edi # 4-byte Reload
+ adcl 160(%esp), %edi
+ movl 64(%esp), %esi # 4-byte Reload
+ adcl 164(%esp), %esi
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 168(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 172(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 40(%esp), %eax # 4-byte Reload
+ adcl 176(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 180(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 184(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ adcl 188(%esp), %ebp
+ movl %ebp, 36(%esp) # 4-byte Spill
+ movl 824(%esp), %eax
+ movl 32(%eax), %eax
+ movl %eax, (%esp)
+ leal 112(%esp), %ecx
+ movl 820(%esp), %edx
+ calll .LmulPv288x32
+ movl 148(%esp), %ebp
+ movl 60(%esp), %ecx # 4-byte Reload
+ addl 112(%esp), %ecx
+ adcl 116(%esp), %edi
+ movl %edi, 68(%esp) # 4-byte Spill
+ adcl 120(%esp), %esi
+ movl %esi, 64(%esp) # 4-byte Spill
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 124(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 128(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 40(%esp), %esi # 4-byte Reload
+ adcl 132(%esp), %esi
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 136(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 140(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 36(%esp), %eax # 4-byte Reload
+ adcl 144(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ adcl $0, %ebp
+ movl 28(%esp), %eax # 4-byte Reload
+ imull %ecx, %eax
+ movl %ecx, %edi
+ movl %eax, (%esp)
+ leal 72(%esp), %ecx
+ movl 828(%esp), %eax
+ movl %eax, %edx
+ calll .LmulPv288x32
+ addl 72(%esp), %edi
+ movl 44(%esp), %edi # 4-byte Reload
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 76(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 64(%esp), %ebx # 4-byte Reload
+ adcl 80(%esp), %ebx
+ movl %ebx, 64(%esp) # 4-byte Spill
+ adcl 84(%esp), %edi
+ movl 52(%esp), %edx # 4-byte Reload
+ adcl 88(%esp), %edx
+ movl %edx, 52(%esp) # 4-byte Spill
+ adcl 92(%esp), %esi
+ movl %esi, 40(%esp) # 4-byte Spill
+ movl 48(%esp), %edx # 4-byte Reload
+ adcl 96(%esp), %edx
+ movl %edx, 48(%esp) # 4-byte Spill
+ movl 56(%esp), %edx # 4-byte Reload
+ adcl 100(%esp), %edx
+ movl %edx, 56(%esp) # 4-byte Spill
+ movl 36(%esp), %edx # 4-byte Reload
+ adcl 104(%esp), %edx
+ movl %edx, 36(%esp) # 4-byte Spill
+ adcl 108(%esp), %ebp
+ movl %ebp, 60(%esp) # 4-byte Spill
+ movl %eax, %edx
+ movl 828(%esp), %eax
+ subl (%eax), %edx
+ sbbl 4(%eax), %ebx
+ movl %edi, %ecx
+ sbbl 8(%eax), %ecx
+ movl 52(%esp), %esi # 4-byte Reload
+ sbbl 12(%eax), %esi
+ movl %esi, 16(%esp) # 4-byte Spill
+ movl 40(%esp), %esi # 4-byte Reload
+ sbbl 16(%eax), %esi
+ movl %esi, 20(%esp) # 4-byte Spill
+ movl 48(%esp), %esi # 4-byte Reload
+ sbbl 20(%eax), %esi
+ movl %esi, 24(%esp) # 4-byte Spill
+ movl 56(%esp), %esi # 4-byte Reload
+ sbbl 24(%eax), %esi
+ movl %esi, 28(%esp) # 4-byte Spill
+ movl 36(%esp), %esi # 4-byte Reload
+ sbbl 28(%eax), %esi
+ movl %esi, 32(%esp) # 4-byte Spill
+ sbbl 32(%eax), %ebp
+ movl %ebp, 44(%esp) # 4-byte Spill
+ sarl $31, %ebp
+ testl %ebp, %ebp
+ movl 68(%esp), %eax # 4-byte Reload
+ js .LBB131_2
+# BB#1:
+ movl %edx, %eax
+.LBB131_2:
+ movl 816(%esp), %edx
+ movl %eax, (%edx)
+ movl 64(%esp), %esi # 4-byte Reload
+ js .LBB131_4
+# BB#3:
+ movl %ebx, %esi
+.LBB131_4:
+ movl %esi, 4(%edx)
+ movl 52(%esp), %ebp # 4-byte Reload
+ movl 40(%esp), %eax # 4-byte Reload
+ js .LBB131_6
+# BB#5:
+ movl %ecx, %edi
+.LBB131_6:
+ movl %edi, 8(%edx)
+ js .LBB131_8
+# BB#7:
+ movl 16(%esp), %ebp # 4-byte Reload
+.LBB131_8:
+ movl %ebp, 12(%edx)
+ js .LBB131_10
+# BB#9:
+ movl 20(%esp), %eax # 4-byte Reload
+.LBB131_10:
+ movl %eax, 16(%edx)
+ movl 48(%esp), %eax # 4-byte Reload
+ js .LBB131_12
+# BB#11:
+ movl 24(%esp), %eax # 4-byte Reload
+.LBB131_12:
+ movl %eax, 20(%edx)
+ movl 56(%esp), %eax # 4-byte Reload
+ js .LBB131_14
+# BB#13:
+ movl 28(%esp), %eax # 4-byte Reload
+.LBB131_14:
+ movl %eax, 24(%edx)
+ movl 36(%esp), %eax # 4-byte Reload
+ js .LBB131_16
+# BB#15:
+ movl 32(%esp), %eax # 4-byte Reload
+.LBB131_16:
+ movl %eax, 28(%edx)
+ movl 60(%esp), %eax # 4-byte Reload
+ js .LBB131_18
+# BB#17:
+ movl 44(%esp), %eax # 4-byte Reload
+.LBB131_18:
+ movl %eax, 32(%edx)
+ addl $796, %esp # imm = 0x31C
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end131:
+ .size mcl_fp_montNF9L, .Lfunc_end131-mcl_fp_montNF9L
+
+ .globl mcl_fp_montRed9L
+ .align 16, 0x90
+ .type mcl_fp_montRed9L,@function
+mcl_fp_montRed9L: # @mcl_fp_montRed9L
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $492, %esp # imm = 0x1EC
+ calll .L132$pb
+.L132$pb:
+ popl %ebx
+.Ltmp13:
+ addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp13-.L132$pb), %ebx
+ movl 520(%esp), %edx
+ movl -4(%edx), %edi
+ movl %edi, 72(%esp) # 4-byte Spill
+ movl 516(%esp), %eax
+ movl (%eax), %esi
+ movl %esi, 76(%esp) # 4-byte Spill
+ movl 4(%eax), %ecx
+ movl %ecx, 52(%esp) # 4-byte Spill
+ movl %esi, %ecx
+ imull %edi, %ecx
+ movl 68(%eax), %esi
+ movl %esi, 80(%esp) # 4-byte Spill
+ movl 64(%eax), %edi
+ movl %edi, 88(%esp) # 4-byte Spill
+ movl 60(%eax), %esi
+ movl %esi, 100(%esp) # 4-byte Spill
+ movl 56(%eax), %esi
+ movl %esi, 112(%esp) # 4-byte Spill
+ movl 52(%eax), %esi
+ movl %esi, 108(%esp) # 4-byte Spill
+ movl 48(%eax), %esi
+ movl %esi, 116(%esp) # 4-byte Spill
+ movl 44(%eax), %esi
+ movl %esi, 96(%esp) # 4-byte Spill
+ movl 40(%eax), %edi
+ movl %edi, 124(%esp) # 4-byte Spill
+ movl 36(%eax), %edi
+ movl %edi, 120(%esp) # 4-byte Spill
+ movl 32(%eax), %edi
+ movl %edi, 104(%esp) # 4-byte Spill
+ movl 28(%eax), %edi
+ movl %edi, 92(%esp) # 4-byte Spill
+ movl 24(%eax), %edi
+ movl %edi, 84(%esp) # 4-byte Spill
+ movl 20(%eax), %ebp
+ movl 16(%eax), %edi
+ movl 12(%eax), %esi
+ movl 8(%eax), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl (%edx), %eax
+ movl %eax, 20(%esp) # 4-byte Spill
+ movl 32(%edx), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 28(%edx), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 24(%edx), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 20(%edx), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 16(%edx), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 12(%edx), %eax
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl 8(%edx), %eax
+ movl %eax, 24(%esp) # 4-byte Spill
+ movl 4(%edx), %eax
+ movl %eax, 16(%esp) # 4-byte Spill
+ movl %ecx, (%esp)
+ leal 448(%esp), %ecx
+ calll .LmulPv288x32
+ movl 76(%esp), %eax # 4-byte Reload
+ addl 448(%esp), %eax
+ movl 52(%esp), %ecx # 4-byte Reload
+ adcl 452(%esp), %ecx
+ movl %ecx, 52(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 456(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ adcl 460(%esp), %esi
+ movl %esi, 60(%esp) # 4-byte Spill
+ adcl 464(%esp), %edi
+ movl %edi, 64(%esp) # 4-byte Spill
+ adcl 468(%esp), %ebp
+ movl %ebp, 68(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 472(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 476(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 480(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 120(%esp), %eax # 4-byte Reload
+ adcl 484(%esp), %eax
+ movl %eax, 120(%esp) # 4-byte Spill
+ adcl $0, 124(%esp) # 4-byte Folded Spill
+ movl 96(%esp), %esi # 4-byte Reload
+ adcl $0, %esi
+ adcl $0, 116(%esp) # 4-byte Folded Spill
+ movl 108(%esp), %edi # 4-byte Reload
+ adcl $0, %edi
+ adcl $0, 112(%esp) # 4-byte Folded Spill
+ adcl $0, 100(%esp) # 4-byte Folded Spill
+ adcl $0, 88(%esp) # 4-byte Folded Spill
+ movl 80(%esp), %ebp # 4-byte Reload
+ adcl $0, %ebp
+ sbbl %eax, %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ imull 72(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 408(%esp), %ecx
+ movl 520(%esp), %edx
+ calll .LmulPv288x32
+ movl 76(%esp), %eax # 4-byte Reload
+ andl $1, %eax
+ movl 52(%esp), %ecx # 4-byte Reload
+ addl 408(%esp), %ecx
+ movl 56(%esp), %edx # 4-byte Reload
+ adcl 412(%esp), %edx
+ movl 60(%esp), %ecx # 4-byte Reload
+ adcl 416(%esp), %ecx
+ movl %ecx, 60(%esp) # 4-byte Spill
+ movl 64(%esp), %ecx # 4-byte Reload
+ adcl 420(%esp), %ecx
+ movl %ecx, 64(%esp) # 4-byte Spill
+ movl 68(%esp), %ecx # 4-byte Reload
+ adcl 424(%esp), %ecx
+ movl %ecx, 68(%esp) # 4-byte Spill
+ movl 84(%esp), %ecx # 4-byte Reload
+ adcl 428(%esp), %ecx
+ movl %ecx, 84(%esp) # 4-byte Spill
+ movl 92(%esp), %ecx # 4-byte Reload
+ adcl 432(%esp), %ecx
+ movl %ecx, 92(%esp) # 4-byte Spill
+ movl 104(%esp), %ecx # 4-byte Reload
+ adcl 436(%esp), %ecx
+ movl %ecx, 104(%esp) # 4-byte Spill
+ movl 120(%esp), %ecx # 4-byte Reload
+ adcl 440(%esp), %ecx
+ movl %ecx, 120(%esp) # 4-byte Spill
+ movl 124(%esp), %ecx # 4-byte Reload
+ adcl 444(%esp), %ecx
+ movl %ecx, 124(%esp) # 4-byte Spill
+ adcl $0, %esi
+ adcl $0, 116(%esp) # 4-byte Folded Spill
+ adcl $0, %edi
+ movl %edi, 108(%esp) # 4-byte Spill
+ adcl $0, 112(%esp) # 4-byte Folded Spill
+ adcl $0, 100(%esp) # 4-byte Folded Spill
+ adcl $0, 88(%esp) # 4-byte Folded Spill
+ adcl $0, %ebp
+ adcl $0, %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl %edx, %edi
+ movl %edi, %eax
+ imull 72(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 368(%esp), %ecx
+ movl 520(%esp), %edx
+ calll .LmulPv288x32
+ addl 368(%esp), %edi
+ movl 60(%esp), %ecx # 4-byte Reload
+ adcl 372(%esp), %ecx
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 376(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 380(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 384(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 388(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 392(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 120(%esp), %eax # 4-byte Reload
+ adcl 396(%esp), %eax
+ movl %eax, 120(%esp) # 4-byte Spill
+ movl 124(%esp), %eax # 4-byte Reload
+ adcl 400(%esp), %eax
+ movl %eax, 124(%esp) # 4-byte Spill
+ adcl 404(%esp), %esi
+ movl %esi, 96(%esp) # 4-byte Spill
+ movl 116(%esp), %edi # 4-byte Reload
+ adcl $0, %edi
+ movl 108(%esp), %esi # 4-byte Reload
+ adcl $0, %esi
+ adcl $0, 112(%esp) # 4-byte Folded Spill
+ adcl $0, 100(%esp) # 4-byte Folded Spill
+ adcl $0, 88(%esp) # 4-byte Folded Spill
+ adcl $0, %ebp
+ movl %ebp, 80(%esp) # 4-byte Spill
+ adcl $0, 76(%esp) # 4-byte Folded Spill
+ movl %ecx, %eax
+ movl %ecx, %ebp
+ imull 72(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 328(%esp), %ecx
+ movl 520(%esp), %edx
+ calll .LmulPv288x32
+ addl 328(%esp), %ebp
+ movl 64(%esp), %ecx # 4-byte Reload
+ adcl 332(%esp), %ecx
+ movl %ecx, 64(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 336(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 340(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 344(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 348(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 120(%esp), %eax # 4-byte Reload
+ adcl 352(%esp), %eax
+ movl %eax, 120(%esp) # 4-byte Spill
+ movl 124(%esp), %eax # 4-byte Reload
+ adcl 356(%esp), %eax
+ movl %eax, 124(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 360(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ adcl 364(%esp), %edi
+ movl %edi, 116(%esp) # 4-byte Spill
+ adcl $0, %esi
+ movl %esi, 108(%esp) # 4-byte Spill
+ movl 112(%esp), %ebp # 4-byte Reload
+ adcl $0, %ebp
+ movl 100(%esp), %edi # 4-byte Reload
+ adcl $0, %edi
+ adcl $0, 88(%esp) # 4-byte Folded Spill
+ movl 80(%esp), %esi # 4-byte Reload
+ adcl $0, %esi
+ adcl $0, 76(%esp) # 4-byte Folded Spill
+ movl %ecx, %eax
+ imull 72(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 288(%esp), %ecx
+ movl 520(%esp), %edx
+ calll .LmulPv288x32
+ movl 64(%esp), %eax # 4-byte Reload
+ addl 288(%esp), %eax
+ movl 68(%esp), %ecx # 4-byte Reload
+ adcl 292(%esp), %ecx
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 296(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 300(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 304(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 120(%esp), %eax # 4-byte Reload
+ adcl 308(%esp), %eax
+ movl %eax, 120(%esp) # 4-byte Spill
+ movl 124(%esp), %eax # 4-byte Reload
+ adcl 312(%esp), %eax
+ movl %eax, 124(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 316(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 116(%esp), %eax # 4-byte Reload
+ adcl 320(%esp), %eax
+ movl %eax, 116(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 324(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ adcl $0, %ebp
+ movl %ebp, 112(%esp) # 4-byte Spill
+ adcl $0, %edi
+ movl %edi, 100(%esp) # 4-byte Spill
+ movl 88(%esp), %edi # 4-byte Reload
+ adcl $0, %edi
+ adcl $0, %esi
+ movl %esi, 80(%esp) # 4-byte Spill
+ adcl $0, 76(%esp) # 4-byte Folded Spill
+ movl %ecx, %esi
+ movl %esi, %eax
+ imull 72(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 248(%esp), %ecx
+ movl 520(%esp), %eax
+ movl %eax, %edx
+ calll .LmulPv288x32
+ addl 248(%esp), %esi
+ movl 84(%esp), %ecx # 4-byte Reload
+ adcl 252(%esp), %ecx
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 256(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 260(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 120(%esp), %ebp # 4-byte Reload
+ adcl 264(%esp), %ebp
+ movl 124(%esp), %eax # 4-byte Reload
+ adcl 268(%esp), %eax
+ movl %eax, 124(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 272(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 116(%esp), %eax # 4-byte Reload
+ adcl 276(%esp), %eax
+ movl %eax, 116(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 280(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 112(%esp), %eax # 4-byte Reload
+ adcl 284(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ adcl $0, 100(%esp) # 4-byte Folded Spill
+ movl %edi, %esi
+ adcl $0, %esi
+ adcl $0, 80(%esp) # 4-byte Folded Spill
+ adcl $0, 76(%esp) # 4-byte Folded Spill
+ movl %ecx, %eax
+ movl %ecx, %edi
+ imull 72(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 208(%esp), %ecx
+ movl 520(%esp), %edx
+ calll .LmulPv288x32
+ addl 208(%esp), %edi
+ movl 92(%esp), %ecx # 4-byte Reload
+ adcl 212(%esp), %ecx
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 216(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ adcl 220(%esp), %ebp
+ movl %ebp, 120(%esp) # 4-byte Spill
+ movl 124(%esp), %eax # 4-byte Reload
+ adcl 224(%esp), %eax
+ movl %eax, 124(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 228(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 116(%esp), %eax # 4-byte Reload
+ adcl 232(%esp), %eax
+ movl %eax, 116(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 236(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 112(%esp), %eax # 4-byte Reload
+ adcl 240(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 244(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ adcl $0, %esi
+ movl %esi, 88(%esp) # 4-byte Spill
+ adcl $0, 80(%esp) # 4-byte Folded Spill
+ movl 76(%esp), %edi # 4-byte Reload
+ adcl $0, %edi
+ movl %ecx, %eax
+ movl %ecx, %ebp
+ imull 72(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 168(%esp), %ecx
+ movl 520(%esp), %eax
+ movl %eax, %edx
+ calll .LmulPv288x32
+ addl 168(%esp), %ebp
+ movl 104(%esp), %ecx # 4-byte Reload
+ adcl 172(%esp), %ecx
+ movl 120(%esp), %eax # 4-byte Reload
+ adcl 176(%esp), %eax
+ movl %eax, 120(%esp) # 4-byte Spill
+ movl 124(%esp), %ebp # 4-byte Reload
+ adcl 180(%esp), %ebp
+ movl 96(%esp), %esi # 4-byte Reload
+ adcl 184(%esp), %esi
+ movl 116(%esp), %eax # 4-byte Reload
+ adcl 188(%esp), %eax
+ movl %eax, 116(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 192(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 112(%esp), %eax # 4-byte Reload
+ adcl 196(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 200(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 204(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ adcl $0, 80(%esp) # 4-byte Folded Spill
+ adcl $0, %edi
+ movl %edi, 76(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ imull %ecx, %eax
+ movl %ecx, %edi
+ movl %eax, (%esp)
+ leal 128(%esp), %ecx
+ movl 520(%esp), %edx
+ calll .LmulPv288x32
+ addl 128(%esp), %edi
+ movl 120(%esp), %eax # 4-byte Reload
+ adcl 132(%esp), %eax
+ movl %eax, 120(%esp) # 4-byte Spill
+ movl %eax, %edi
+ adcl 136(%esp), %ebp
+ movl %ebp, 124(%esp) # 4-byte Spill
+ adcl 140(%esp), %esi
+ movl %esi, 96(%esp) # 4-byte Spill
+ movl 116(%esp), %ecx # 4-byte Reload
+ adcl 144(%esp), %ecx
+ movl %ecx, 116(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 148(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl %eax, %ebx
+ movl 112(%esp), %eax # 4-byte Reload
+ adcl 152(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 156(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 160(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 80(%esp), %edx # 4-byte Reload
+ adcl 164(%esp), %edx
+ movl %edx, 80(%esp) # 4-byte Spill
+ movl 76(%esp), %ebp # 4-byte Reload
+ adcl $0, %ebp
+ subl 20(%esp), %edi # 4-byte Folded Reload
+ movl 124(%esp), %eax # 4-byte Reload
+ sbbl 16(%esp), %eax # 4-byte Folded Reload
+ sbbl 24(%esp), %esi # 4-byte Folded Reload
+ sbbl 28(%esp), %ecx # 4-byte Folded Reload
+ sbbl 32(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 72(%esp) # 4-byte Spill
+ movl 112(%esp), %ebx # 4-byte Reload
+ sbbl 36(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 76(%esp) # 4-byte Spill
+ movl 100(%esp), %ebx # 4-byte Reload
+ sbbl 40(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 84(%esp) # 4-byte Spill
+ movl 88(%esp), %ebx # 4-byte Reload
+ sbbl 44(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 92(%esp) # 4-byte Spill
+ movl %edx, %ebx
+ movl %ebp, %edx
+ sbbl 48(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 104(%esp) # 4-byte Spill
+ sbbl $0, %edx
+ andl $1, %edx
+ jne .LBB132_2
+# BB#1:
+ movl %ecx, 116(%esp) # 4-byte Spill
+.LBB132_2:
+ testb %dl, %dl
+ movl 120(%esp), %ecx # 4-byte Reload
+ jne .LBB132_4
+# BB#3:
+ movl %edi, %ecx
+.LBB132_4:
+ movl 512(%esp), %edi
+ movl %ecx, (%edi)
+ movl 88(%esp), %ecx # 4-byte Reload
+ jne .LBB132_6
+# BB#5:
+ movl %eax, 124(%esp) # 4-byte Spill
+.LBB132_6:
+ movl 124(%esp), %eax # 4-byte Reload
+ movl %eax, 4(%edi)
+ movl 96(%esp), %eax # 4-byte Reload
+ jne .LBB132_8
+# BB#7:
+ movl %esi, %eax
+.LBB132_8:
+ movl %eax, 8(%edi)
+ movl 116(%esp), %eax # 4-byte Reload
+ movl %eax, 12(%edi)
+ movl 80(%esp), %eax # 4-byte Reload
+ movl 108(%esp), %ebp # 4-byte Reload
+ jne .LBB132_10
+# BB#9:
+ movl 72(%esp), %ebp # 4-byte Reload
+.LBB132_10:
+ movl %ebp, 16(%edi)
+ movl 112(%esp), %ebx # 4-byte Reload
+ jne .LBB132_12
+# BB#11:
+ movl 76(%esp), %ebx # 4-byte Reload
+.LBB132_12:
+ movl %ebx, 20(%edi)
+ movl 100(%esp), %esi # 4-byte Reload
+ jne .LBB132_14
+# BB#13:
+ movl 84(%esp), %esi # 4-byte Reload
+.LBB132_14:
+ movl %esi, 24(%edi)
+ jne .LBB132_16
+# BB#15:
+ movl 92(%esp), %ecx # 4-byte Reload
+.LBB132_16:
+ movl %ecx, 28(%edi)
+ jne .LBB132_18
+# BB#17:
+ movl 104(%esp), %eax # 4-byte Reload
+.LBB132_18:
+ movl %eax, 32(%edi)
+ addl $492, %esp # imm = 0x1EC
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end132:
+ .size mcl_fp_montRed9L, .Lfunc_end132-mcl_fp_montRed9L
+
+ .globl mcl_fp_addPre9L
+ .align 16, 0x90
+ .type mcl_fp_addPre9L,@function
+mcl_fp_addPre9L: # @mcl_fp_addPre9L
+# BB#0:
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ movl 24(%esp), %eax
+ movl (%eax), %edx
+ movl 4(%eax), %esi
+ movl 20(%esp), %ecx
+ addl (%ecx), %edx
+ adcl 4(%ecx), %esi
+ movl 8(%eax), %edi
+ adcl 8(%ecx), %edi
+ movl 16(%esp), %ebx
+ movl %edx, (%ebx)
+ movl 12(%ecx), %edx
+ movl %esi, 4(%ebx)
+ movl 16(%ecx), %esi
+ adcl 12(%eax), %edx
+ adcl 16(%eax), %esi
+ movl %edi, 8(%ebx)
+ movl 20(%eax), %edi
+ movl %edx, 12(%ebx)
+ movl 20(%ecx), %edx
+ adcl %edi, %edx
+ movl 24(%eax), %edi
+ movl %esi, 16(%ebx)
+ movl 24(%ecx), %esi
+ adcl %edi, %esi
+ movl 28(%eax), %edi
+ movl %edx, 20(%ebx)
+ movl 28(%ecx), %edx
+ adcl %edi, %edx
+ movl %esi, 24(%ebx)
+ movl %edx, 28(%ebx)
+ movl 32(%eax), %eax
+ movl 32(%ecx), %ecx
+ adcl %eax, %ecx
+ movl %ecx, 32(%ebx)
+ sbbl %eax, %eax
+ andl $1, %eax
+ popl %esi
+ popl %edi
+ popl %ebx
+ retl
+.Lfunc_end133:
+ .size mcl_fp_addPre9L, .Lfunc_end133-mcl_fp_addPre9L
+
+ .globl mcl_fp_subPre9L
+ .align 16, 0x90
+ .type mcl_fp_subPre9L,@function
+mcl_fp_subPre9L: # @mcl_fp_subPre9L
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ movl 24(%esp), %ecx
+ movl (%ecx), %esi
+ movl 4(%ecx), %edi
+ xorl %eax, %eax
+ movl 28(%esp), %edx
+ subl (%edx), %esi
+ sbbl 4(%edx), %edi
+ movl 8(%ecx), %ebx
+ sbbl 8(%edx), %ebx
+ movl 20(%esp), %ebp
+ movl %esi, (%ebp)
+ movl 12(%ecx), %esi
+ sbbl 12(%edx), %esi
+ movl %edi, 4(%ebp)
+ movl 16(%ecx), %edi
+ sbbl 16(%edx), %edi
+ movl %ebx, 8(%ebp)
+ movl 20(%edx), %ebx
+ movl %esi, 12(%ebp)
+ movl 20(%ecx), %esi
+ sbbl %ebx, %esi
+ movl 24(%edx), %ebx
+ movl %edi, 16(%ebp)
+ movl 24(%ecx), %edi
+ sbbl %ebx, %edi
+ movl 28(%edx), %ebx
+ movl %esi, 20(%ebp)
+ movl 28(%ecx), %esi
+ sbbl %ebx, %esi
+ movl %edi, 24(%ebp)
+ movl %esi, 28(%ebp)
+ movl 32(%edx), %edx
+ movl 32(%ecx), %ecx
+ sbbl %edx, %ecx
+ movl %ecx, 32(%ebp)
+ sbbl $0, %eax
+ andl $1, %eax
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end134:
+ .size mcl_fp_subPre9L, .Lfunc_end134-mcl_fp_subPre9L
+
+ .globl mcl_fp_shr1_9L
+ .align 16, 0x90
+ .type mcl_fp_shr1_9L,@function
+mcl_fp_shr1_9L: # @mcl_fp_shr1_9L
+# BB#0:
+ pushl %esi
+ movl 12(%esp), %eax
+ movl (%eax), %ecx
+ movl 4(%eax), %edx
+ shrdl $1, %edx, %ecx
+ movl 8(%esp), %esi
+ movl %ecx, (%esi)
+ movl 8(%eax), %ecx
+ shrdl $1, %ecx, %edx
+ movl %edx, 4(%esi)
+ movl 12(%eax), %edx
+ shrdl $1, %edx, %ecx
+ movl %ecx, 8(%esi)
+ movl 16(%eax), %ecx
+ shrdl $1, %ecx, %edx
+ movl %edx, 12(%esi)
+ movl 20(%eax), %edx
+ shrdl $1, %edx, %ecx
+ movl %ecx, 16(%esi)
+ movl 24(%eax), %ecx
+ shrdl $1, %ecx, %edx
+ movl %edx, 20(%esi)
+ movl 28(%eax), %edx
+ shrdl $1, %edx, %ecx
+ movl %ecx, 24(%esi)
+ movl 32(%eax), %eax
+ shrdl $1, %eax, %edx
+ movl %edx, 28(%esi)
+ shrl %eax
+ movl %eax, 32(%esi)
+ popl %esi
+ retl
+.Lfunc_end135:
+ .size mcl_fp_shr1_9L, .Lfunc_end135-mcl_fp_shr1_9L
+
+ .globl mcl_fp_add9L
+ .align 16, 0x90
+ .type mcl_fp_add9L,@function
+mcl_fp_add9L: # @mcl_fp_add9L
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $20, %esp
+ movl 48(%esp), %edi
+ movl (%edi), %ecx
+ movl 4(%edi), %eax
+ movl 44(%esp), %ebx
+ addl (%ebx), %ecx
+ movl %ecx, %ebp
+ adcl 4(%ebx), %eax
+ movl %eax, 16(%esp) # 4-byte Spill
+ movl 8(%edi), %eax
+ adcl 8(%ebx), %eax
+ movl %eax, 12(%esp) # 4-byte Spill
+ movl 12(%ebx), %ecx
+ movl 16(%ebx), %eax
+ adcl 12(%edi), %ecx
+ movl %ecx, 8(%esp) # 4-byte Spill
+ adcl 16(%edi), %eax
+ movl %eax, 4(%esp) # 4-byte Spill
+ movl 20(%ebx), %esi
+ adcl 20(%edi), %esi
+ movl 24(%ebx), %edx
+ adcl 24(%edi), %edx
+ movl 28(%ebx), %ecx
+ adcl 28(%edi), %ecx
+ movl 32(%ebx), %eax
+ adcl 32(%edi), %eax
+ movl 40(%esp), %edi
+ movl %ebp, (%edi)
+ movl 16(%esp), %ebx # 4-byte Reload
+ movl %ebx, 4(%edi)
+ movl 12(%esp), %ebx # 4-byte Reload
+ movl %ebx, 8(%edi)
+ movl 8(%esp), %ebx # 4-byte Reload
+ movl %ebx, 12(%edi)
+ movl 4(%esp), %ebx # 4-byte Reload
+ movl %ebx, 16(%edi)
+ movl %esi, 20(%edi)
+ movl %edx, 24(%edi)
+ movl %ecx, 28(%edi)
+ movl %eax, 32(%edi)
+ sbbl %ebx, %ebx
+ andl $1, %ebx
+ movl 52(%esp), %edi
+ subl (%edi), %ebp
+ movl %ebp, (%esp) # 4-byte Spill
+ movl 16(%esp), %ebp # 4-byte Reload
+ sbbl 4(%edi), %ebp
+ movl %ebp, 16(%esp) # 4-byte Spill
+ movl 12(%esp), %ebp # 4-byte Reload
+ sbbl 8(%edi), %ebp
+ movl %ebp, 12(%esp) # 4-byte Spill
+ movl 8(%esp), %ebp # 4-byte Reload
+ sbbl 12(%edi), %ebp
+ movl %ebp, 8(%esp) # 4-byte Spill
+ movl 4(%esp), %ebp # 4-byte Reload
+ sbbl 16(%edi), %ebp
+ sbbl 20(%edi), %esi
+ sbbl 24(%edi), %edx
+ sbbl 28(%edi), %ecx
+ sbbl 32(%edi), %eax
+ sbbl $0, %ebx
+ testb $1, %bl
+ jne .LBB136_2
+# BB#1: # %nocarry
+ movl (%esp), %edi # 4-byte Reload
+ movl 40(%esp), %ebx
+ movl %edi, (%ebx)
+ movl 16(%esp), %edi # 4-byte Reload
+ movl %edi, 4(%ebx)
+ movl 12(%esp), %edi # 4-byte Reload
+ movl %edi, 8(%ebx)
+ movl 8(%esp), %edi # 4-byte Reload
+ movl %edi, 12(%ebx)
+ movl %ebp, 16(%ebx)
+ movl %esi, 20(%ebx)
+ movl %edx, 24(%ebx)
+ movl %ecx, 28(%ebx)
+ movl %eax, 32(%ebx)
+.LBB136_2: # %carry
+ addl $20, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end136:
+ .size mcl_fp_add9L, .Lfunc_end136-mcl_fp_add9L
+
+ .globl mcl_fp_addNF9L
+ .align 16, 0x90
+ .type mcl_fp_addNF9L,@function
+mcl_fp_addNF9L: # @mcl_fp_addNF9L
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $72, %esp
+ movl 100(%esp), %eax
+ movl (%eax), %ecx
+ movl 4(%eax), %edi
+ movl 96(%esp), %esi
+ addl (%esi), %ecx
+ movl %ecx, 36(%esp) # 4-byte Spill
+ adcl 4(%esi), %edi
+ movl %edi, 44(%esp) # 4-byte Spill
+ movl 32(%eax), %ecx
+ movl %ecx, 60(%esp) # 4-byte Spill
+ movl 28(%eax), %ebp
+ movl 24(%eax), %ecx
+ movl %ecx, 68(%esp) # 4-byte Spill
+ movl 20(%eax), %ecx
+ movl %ecx, 64(%esp) # 4-byte Spill
+ movl 16(%eax), %ebx
+ movl 12(%eax), %edx
+ movl 8(%eax), %ecx
+ adcl 8(%esi), %ecx
+ movl %ecx, 40(%esp) # 4-byte Spill
+ adcl 12(%esi), %edx
+ movl %edx, 48(%esp) # 4-byte Spill
+ adcl 16(%esi), %ebx
+ movl %ebx, 52(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 20(%esi), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 24(%esi), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ adcl 28(%esi), %ebp
+ movl %ebp, 56(%esp) # 4-byte Spill
+ movl 60(%esp), %ebp # 4-byte Reload
+ adcl 32(%esi), %ebp
+ movl %ebp, 60(%esp) # 4-byte Spill
+ movl 104(%esp), %esi
+ movl 36(%esp), %eax # 4-byte Reload
+ movl %eax, %ebp
+ subl (%esi), %ebp
+ movl %ebp, (%esp) # 4-byte Spill
+ sbbl 4(%esi), %edi
+ movl %edi, 4(%esp) # 4-byte Spill
+ sbbl 8(%esi), %ecx
+ movl %ecx, 8(%esp) # 4-byte Spill
+ sbbl 12(%esi), %edx
+ movl %edx, 12(%esp) # 4-byte Spill
+ sbbl 16(%esi), %ebx
+ movl %ebx, 16(%esp) # 4-byte Spill
+ movl 64(%esp), %ebx # 4-byte Reload
+ sbbl 20(%esi), %ebx
+ movl %ebx, 20(%esp) # 4-byte Spill
+ movl 68(%esp), %ebp # 4-byte Reload
+ sbbl 24(%esi), %ebp
+ movl %ebp, 24(%esp) # 4-byte Spill
+ movl 56(%esp), %ecx # 4-byte Reload
+ sbbl 28(%esi), %ecx
+ movl %ecx, 28(%esp) # 4-byte Spill
+ movl 60(%esp), %ecx # 4-byte Reload
+ movl %ecx, %edx
+ movl %ecx, %ebp
+ sbbl 32(%esi), %edx
+ movl %edx, 32(%esp) # 4-byte Spill
+ movl %edx, %esi
+ sarl $31, %esi
+ testl %esi, %esi
+ js .LBB137_2
+# BB#1:
+ movl (%esp), %eax # 4-byte Reload
+.LBB137_2:
+ movl 92(%esp), %ecx
+ movl %eax, (%ecx)
+ movl 44(%esp), %eax # 4-byte Reload
+ js .LBB137_4
+# BB#3:
+ movl 4(%esp), %eax # 4-byte Reload
+.LBB137_4:
+ movl %eax, 4(%ecx)
+ movl 68(%esp), %esi # 4-byte Reload
+ movl 64(%esp), %edi # 4-byte Reload
+ movl 52(%esp), %ebx # 4-byte Reload
+ movl 48(%esp), %edx # 4-byte Reload
+ movl 40(%esp), %eax # 4-byte Reload
+ js .LBB137_6
+# BB#5:
+ movl 8(%esp), %eax # 4-byte Reload
+.LBB137_6:
+ movl %eax, 8(%ecx)
+ movl %ebp, %eax
+ js .LBB137_8
+# BB#7:
+ movl 12(%esp), %edx # 4-byte Reload
+.LBB137_8:
+ movl %edx, 12(%ecx)
+ movl 56(%esp), %edx # 4-byte Reload
+ js .LBB137_10
+# BB#9:
+ movl 16(%esp), %ebx # 4-byte Reload
+.LBB137_10:
+ movl %ebx, 16(%ecx)
+ js .LBB137_12
+# BB#11:
+ movl 20(%esp), %edi # 4-byte Reload
+.LBB137_12:
+ movl %edi, 20(%ecx)
+ js .LBB137_14
+# BB#13:
+ movl 24(%esp), %esi # 4-byte Reload
+.LBB137_14:
+ movl %esi, 24(%ecx)
+ js .LBB137_16
+# BB#15:
+ movl 28(%esp), %edx # 4-byte Reload
+.LBB137_16:
+ movl %edx, 28(%ecx)
+ js .LBB137_18
+# BB#17:
+ movl 32(%esp), %eax # 4-byte Reload
+.LBB137_18:
+ movl %eax, 32(%ecx)
+ addl $72, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end137:
+ .size mcl_fp_addNF9L, .Lfunc_end137-mcl_fp_addNF9L
+
+ .globl mcl_fp_sub9L
+ .align 16, 0x90
+ .type mcl_fp_sub9L,@function
+mcl_fp_sub9L: # @mcl_fp_sub9L
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $28, %esp
+ movl 52(%esp), %esi
+ movl (%esi), %eax
+ movl 4(%esi), %ecx
+ xorl %ebx, %ebx
+ movl 56(%esp), %edi
+ subl (%edi), %eax
+ movl %eax, 12(%esp) # 4-byte Spill
+ sbbl 4(%edi), %ecx
+ movl %ecx, 16(%esp) # 4-byte Spill
+ movl 8(%esi), %eax
+ sbbl 8(%edi), %eax
+ movl %eax, 20(%esp) # 4-byte Spill
+ movl 12(%esi), %eax
+ sbbl 12(%edi), %eax
+ movl %eax, 24(%esp) # 4-byte Spill
+ movl 16(%esi), %edx
+ sbbl 16(%edi), %edx
+ movl %edx, 8(%esp) # 4-byte Spill
+ movl 20(%esi), %ecx
+ sbbl 20(%edi), %ecx
+ movl %ecx, 4(%esp) # 4-byte Spill
+ movl 24(%esi), %eax
+ sbbl 24(%edi), %eax
+ movl %eax, (%esp) # 4-byte Spill
+ movl 28(%esi), %ebp
+ sbbl 28(%edi), %ebp
+ movl 32(%esi), %esi
+ sbbl 32(%edi), %esi
+ sbbl $0, %ebx
+ testb $1, %bl
+ movl 48(%esp), %ebx
+ movl 12(%esp), %edi # 4-byte Reload
+ movl %edi, (%ebx)
+ movl 16(%esp), %edi # 4-byte Reload
+ movl %edi, 4(%ebx)
+ movl 20(%esp), %edi # 4-byte Reload
+ movl %edi, 8(%ebx)
+ movl 24(%esp), %edi # 4-byte Reload
+ movl %edi, 12(%ebx)
+ movl %edx, 16(%ebx)
+ movl %ecx, 20(%ebx)
+ movl %eax, 24(%ebx)
+ movl %ebp, 28(%ebx)
+ movl %esi, 32(%ebx)
+ je .LBB138_2
+# BB#1: # %carry
+ movl %esi, %edi
+ movl 60(%esp), %esi
+ movl 12(%esp), %ecx # 4-byte Reload
+ addl (%esi), %ecx
+ movl %ecx, (%ebx)
+ movl 16(%esp), %edx # 4-byte Reload
+ adcl 4(%esi), %edx
+ movl %edx, 4(%ebx)
+ movl 20(%esp), %ecx # 4-byte Reload
+ adcl 8(%esi), %ecx
+ movl 12(%esi), %eax
+ adcl 24(%esp), %eax # 4-byte Folded Reload
+ movl %ecx, 8(%ebx)
+ movl 16(%esi), %ecx
+ adcl 8(%esp), %ecx # 4-byte Folded Reload
+ movl %eax, 12(%ebx)
+ movl 20(%esi), %eax
+ adcl 4(%esp), %eax # 4-byte Folded Reload
+ movl %ecx, 16(%ebx)
+ movl 24(%esi), %ecx
+ adcl (%esp), %ecx # 4-byte Folded Reload
+ movl %eax, 20(%ebx)
+ movl %ecx, 24(%ebx)
+ movl 28(%esi), %eax
+ adcl %ebp, %eax
+ movl %eax, 28(%ebx)
+ movl 32(%esi), %eax
+ adcl %edi, %eax
+ movl %eax, 32(%ebx)
+.LBB138_2: # %nocarry
+ addl $28, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end138:
+ .size mcl_fp_sub9L, .Lfunc_end138-mcl_fp_sub9L
+
+ .globl mcl_fp_subNF9L
+ .align 16, 0x90
+ .type mcl_fp_subNF9L,@function
+mcl_fp_subNF9L: # @mcl_fp_subNF9L
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $48, %esp
+ movl 72(%esp), %ecx
+ movl (%ecx), %edx
+ movl 4(%ecx), %eax
+ movl 76(%esp), %esi
+ subl (%esi), %edx
+ movl %edx, 32(%esp) # 4-byte Spill
+ sbbl 4(%esi), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 32(%ecx), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 28(%ecx), %edx
+ movl 24(%ecx), %edi
+ movl 20(%ecx), %ebx
+ movl 16(%ecx), %ebp
+ movl 12(%ecx), %eax
+ movl 8(%ecx), %ecx
+ movl 76(%esp), %esi
+ sbbl 8(%esi), %ecx
+ movl %ecx, 12(%esp) # 4-byte Spill
+ movl 76(%esp), %ecx
+ sbbl 12(%ecx), %eax
+ movl %eax, 16(%esp) # 4-byte Spill
+ sbbl 16(%ecx), %ebp
+ movl %ebp, 20(%esp) # 4-byte Spill
+ sbbl 20(%ecx), %ebx
+ movl %ebx, 24(%esp) # 4-byte Spill
+ sbbl 24(%ecx), %edi
+ movl %edi, 28(%esp) # 4-byte Spill
+ sbbl 28(%ecx), %edx
+ movl %edx, 40(%esp) # 4-byte Spill
+ movl 44(%esp), %edx # 4-byte Reload
+ sbbl 32(%ecx), %edx
+ movl %edx, 44(%esp) # 4-byte Spill
+ movl %edx, %ecx
+ sarl $31, %ecx
+ movl %ecx, %eax
+ shldl $1, %edx, %eax
+ movl 80(%esp), %ebp
+ movl 12(%ebp), %edx
+ andl %eax, %edx
+ movl %edx, (%esp) # 4-byte Spill
+ movl 4(%ebp), %edi
+ andl %eax, %edi
+ andl (%ebp), %eax
+ movl 32(%ebp), %edx
+ andl %ecx, %edx
+ movl %edx, 8(%esp) # 4-byte Spill
+ movl 28(%ebp), %edx
+ andl %ecx, %edx
+ movl %edx, 4(%esp) # 4-byte Spill
+ roll %ecx
+ movl 24(%ebp), %ebx
+ andl %ecx, %ebx
+ movl 20(%ebp), %esi
+ andl %ecx, %esi
+ movl 16(%ebp), %edx
+ andl %ecx, %edx
+ andl 8(%ebp), %ecx
+ addl 32(%esp), %eax # 4-byte Folded Reload
+ adcl 36(%esp), %edi # 4-byte Folded Reload
+ movl 68(%esp), %ebp
+ movl %eax, (%ebp)
+ adcl 12(%esp), %ecx # 4-byte Folded Reload
+ movl %edi, 4(%ebp)
+ movl (%esp), %eax # 4-byte Reload
+ adcl 16(%esp), %eax # 4-byte Folded Reload
+ movl %ecx, 8(%ebp)
+ adcl 20(%esp), %edx # 4-byte Folded Reload
+ movl %eax, 12(%ebp)
+ adcl 24(%esp), %esi # 4-byte Folded Reload
+ movl %edx, 16(%ebp)
+ adcl 28(%esp), %ebx # 4-byte Folded Reload
+ movl %esi, 20(%ebp)
+ movl 4(%esp), %eax # 4-byte Reload
+ adcl 40(%esp), %eax # 4-byte Folded Reload
+ movl %ebx, 24(%ebp)
+ movl %eax, 28(%ebp)
+ movl 8(%esp), %eax # 4-byte Reload
+ adcl 44(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 32(%ebp)
+ addl $48, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end139:
+ .size mcl_fp_subNF9L, .Lfunc_end139-mcl_fp_subNF9L
+
+ .globl mcl_fpDbl_add9L
+ .align 16, 0x90
+ .type mcl_fpDbl_add9L,@function
+mcl_fpDbl_add9L: # @mcl_fpDbl_add9L
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $68, %esp
+ movl 96(%esp), %edx
+ movl 92(%esp), %edi
+ movl 12(%edi), %esi
+ movl 16(%edi), %ecx
+ movl 8(%edx), %ebx
+ movl (%edx), %ebp
+ addl (%edi), %ebp
+ movl 88(%esp), %eax
+ movl %ebp, (%eax)
+ movl 4(%edx), %ebp
+ adcl 4(%edi), %ebp
+ adcl 8(%edi), %ebx
+ adcl 12(%edx), %esi
+ adcl 16(%edx), %ecx
+ movl %ebp, 4(%eax)
+ movl 44(%edx), %ebp
+ movl %ebp, 64(%esp) # 4-byte Spill
+ movl %ebx, 8(%eax)
+ movl 20(%edx), %ebx
+ movl %esi, 12(%eax)
+ movl 20(%edi), %esi
+ adcl %ebx, %esi
+ movl 24(%edx), %ebx
+ movl %ecx, 16(%eax)
+ movl 24(%edi), %ecx
+ adcl %ebx, %ecx
+ movl 28(%edx), %ebx
+ movl %esi, 20(%eax)
+ movl 28(%edi), %esi
+ adcl %ebx, %esi
+ movl 32(%edx), %ebx
+ movl %ecx, 24(%eax)
+ movl 32(%edi), %ecx
+ adcl %ebx, %ecx
+ movl 36(%edx), %ebp
+ movl %esi, 28(%eax)
+ movl 36(%edi), %esi
+ adcl %ebp, %esi
+ movl %esi, 60(%esp) # 4-byte Spill
+ movl 40(%edx), %esi
+ movl %ecx, 32(%eax)
+ movl 40(%edi), %eax
+ adcl %esi, %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 44(%edi), %eax
+ adcl 64(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 48(%edx), %ecx
+ movl 48(%edi), %ebx
+ adcl %ecx, %ebx
+ movl %ebx, 36(%esp) # 4-byte Spill
+ movl 52(%edx), %eax
+ movl 52(%edi), %ecx
+ adcl %eax, %ecx
+ movl %ecx, 40(%esp) # 4-byte Spill
+ movl 56(%edx), %esi
+ movl 56(%edi), %eax
+ adcl %esi, %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 60(%edx), %ebp
+ movl 60(%edi), %esi
+ adcl %ebp, %esi
+ movl %esi, 44(%esp) # 4-byte Spill
+ movl 64(%edx), %eax
+ movl 64(%edi), %ebp
+ adcl %eax, %ebp
+ movl %ebp, 48(%esp) # 4-byte Spill
+ movl 68(%edx), %edx
+ movl 68(%edi), %eax
+ adcl %edx, %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ sbbl %edx, %edx
+ andl $1, %edx
+ movl 100(%esp), %edi
+ movl 60(%esp), %eax # 4-byte Reload
+ subl (%edi), %eax
+ movl %eax, 16(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ sbbl 4(%edi), %eax
+ movl %eax, 12(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ sbbl 8(%edi), %eax
+ movl %eax, 8(%esp) # 4-byte Spill
+ sbbl 12(%edi), %ebx
+ movl %ebx, 4(%esp) # 4-byte Spill
+ sbbl 16(%edi), %ecx
+ movl %ecx, (%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ sbbl 20(%edi), %eax
+ movl %eax, 20(%esp) # 4-byte Spill
+ sbbl 24(%edi), %esi
+ movl %esi, 24(%esp) # 4-byte Spill
+ movl %ebp, %eax
+ movl 32(%esp), %ebp # 4-byte Reload
+ sbbl 28(%edi), %eax
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl %ebp, %ebx
+ sbbl 32(%edi), %ebx
+ sbbl $0, %edx
+ andl $1, %edx
+ jne .LBB140_2
+# BB#1:
+ movl %ebx, %ebp
+.LBB140_2:
+ testb %dl, %dl
+ movl 60(%esp), %edx # 4-byte Reload
+ movl 40(%esp), %ecx # 4-byte Reload
+ movl 36(%esp), %esi # 4-byte Reload
+ movl 56(%esp), %edi # 4-byte Reload
+ movl 52(%esp), %ebx # 4-byte Reload
+ jne .LBB140_4
+# BB#3:
+ movl (%esp), %ecx # 4-byte Reload
+ movl 4(%esp), %esi # 4-byte Reload
+ movl 8(%esp), %edi # 4-byte Reload
+ movl 12(%esp), %ebx # 4-byte Reload
+ movl 16(%esp), %edx # 4-byte Reload
+.LBB140_4:
+ movl 88(%esp), %eax
+ movl %edx, 36(%eax)
+ movl %ebx, 40(%eax)
+ movl %edi, 44(%eax)
+ movl %esi, 48(%eax)
+ movl %ecx, 52(%eax)
+ movl 44(%esp), %edx # 4-byte Reload
+ movl 64(%esp), %ecx # 4-byte Reload
+ jne .LBB140_6
+# BB#5:
+ movl 20(%esp), %ecx # 4-byte Reload
+.LBB140_6:
+ movl %ecx, 56(%eax)
+ movl 48(%esp), %ecx # 4-byte Reload
+ jne .LBB140_8
+# BB#7:
+ movl 24(%esp), %edx # 4-byte Reload
+.LBB140_8:
+ movl %edx, 60(%eax)
+ jne .LBB140_10
+# BB#9:
+ movl 28(%esp), %ecx # 4-byte Reload
+.LBB140_10:
+ movl %ecx, 64(%eax)
+ movl %ebp, 68(%eax)
+ addl $68, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end140:
+ .size mcl_fpDbl_add9L, .Lfunc_end140-mcl_fpDbl_add9L
+
+ .globl mcl_fpDbl_sub9L
+ .align 16, 0x90
+ .type mcl_fpDbl_sub9L,@function
+mcl_fpDbl_sub9L: # @mcl_fpDbl_sub9L
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $52, %esp
+ movl 76(%esp), %ebx
+ movl (%ebx), %eax
+ movl 4(%ebx), %edx
+ movl 80(%esp), %ebp
+ subl (%ebp), %eax
+ sbbl 4(%ebp), %edx
+ movl 8(%ebx), %esi
+ sbbl 8(%ebp), %esi
+ movl 72(%esp), %ecx
+ movl %eax, (%ecx)
+ movl 12(%ebx), %eax
+ sbbl 12(%ebp), %eax
+ movl %edx, 4(%ecx)
+ movl 16(%ebx), %edx
+ sbbl 16(%ebp), %edx
+ movl %esi, 8(%ecx)
+ movl 20(%ebp), %esi
+ movl %eax, 12(%ecx)
+ movl 20(%ebx), %eax
+ sbbl %esi, %eax
+ movl 24(%ebp), %esi
+ movl %edx, 16(%ecx)
+ movl 24(%ebx), %edx
+ sbbl %esi, %edx
+ movl 28(%ebp), %esi
+ movl %eax, 20(%ecx)
+ movl 28(%ebx), %eax
+ sbbl %esi, %eax
+ movl 32(%ebp), %esi
+ movl %edx, 24(%ecx)
+ movl 32(%ebx), %edx
+ sbbl %esi, %edx
+ movl 36(%ebp), %esi
+ movl %eax, 28(%ecx)
+ movl 36(%ebx), %eax
+ sbbl %esi, %eax
+ movl %eax, 24(%esp) # 4-byte Spill
+ movl 40(%ebp), %eax
+ movl %edx, 32(%ecx)
+ movl 40(%ebx), %edx
+ sbbl %eax, %edx
+ movl %edx, 16(%esp) # 4-byte Spill
+ movl 44(%ebp), %eax
+ movl 44(%ebx), %edx
+ sbbl %eax, %edx
+ movl %edx, 20(%esp) # 4-byte Spill
+ movl 48(%ebp), %eax
+ movl 48(%ebx), %edx
+ sbbl %eax, %edx
+ movl %edx, 28(%esp) # 4-byte Spill
+ movl 52(%ebp), %eax
+ movl 52(%ebx), %edx
+ sbbl %eax, %edx
+ movl %edx, 32(%esp) # 4-byte Spill
+ movl 56(%ebp), %eax
+ movl 56(%ebx), %edx
+ sbbl %eax, %edx
+ movl %edx, 36(%esp) # 4-byte Spill
+ movl 60(%ebp), %eax
+ movl 60(%ebx), %edx
+ sbbl %eax, %edx
+ movl %edx, 40(%esp) # 4-byte Spill
+ movl 64(%ebp), %eax
+ movl 64(%ebx), %edx
+ sbbl %eax, %edx
+ movl %edx, 44(%esp) # 4-byte Spill
+ movl 68(%ebp), %eax
+ movl 68(%ebx), %edx
+ sbbl %eax, %edx
+ movl %edx, 48(%esp) # 4-byte Spill
+ movl $0, %eax
+ sbbl $0, %eax
+ andl $1, %eax
+ movl 84(%esp), %ebp
+ jne .LBB141_1
+# BB#2:
+ movl $0, 12(%esp) # 4-byte Folded Spill
+ jmp .LBB141_3
+.LBB141_1:
+ movl 32(%ebp), %edx
+ movl %edx, 12(%esp) # 4-byte Spill
+.LBB141_3:
+ testb %al, %al
+ jne .LBB141_4
+# BB#5:
+ movl $0, 4(%esp) # 4-byte Folded Spill
+ movl $0, %esi
+ jmp .LBB141_6
+.LBB141_4:
+ movl (%ebp), %esi
+ movl 4(%ebp), %eax
+ movl %eax, 4(%esp) # 4-byte Spill
+.LBB141_6:
+ jne .LBB141_7
+# BB#8:
+ movl $0, 8(%esp) # 4-byte Folded Spill
+ jmp .LBB141_9
+.LBB141_7:
+ movl 28(%ebp), %eax
+ movl %eax, 8(%esp) # 4-byte Spill
+.LBB141_9:
+ jne .LBB141_10
+# BB#11:
+ movl $0, (%esp) # 4-byte Folded Spill
+ jmp .LBB141_12
+.LBB141_10:
+ movl 24(%ebp), %eax
+ movl %eax, (%esp) # 4-byte Spill
+.LBB141_12:
+ jne .LBB141_13
+# BB#14:
+ movl $0, %edi
+ jmp .LBB141_15
+.LBB141_13:
+ movl 20(%ebp), %edi
+.LBB141_15:
+ jne .LBB141_16
+# BB#17:
+ movl $0, %ebx
+ jmp .LBB141_18
+.LBB141_16:
+ movl 16(%ebp), %ebx
+.LBB141_18:
+ jne .LBB141_19
+# BB#20:
+ movl %ebp, %eax
+ movl $0, %ebp
+ jmp .LBB141_21
+.LBB141_19:
+ movl %ebp, %eax
+ movl 12(%eax), %ebp
+.LBB141_21:
+ jne .LBB141_22
+# BB#23:
+ xorl %eax, %eax
+ jmp .LBB141_24
+.LBB141_22:
+ movl 8(%eax), %eax
+.LBB141_24:
+ addl 24(%esp), %esi # 4-byte Folded Reload
+ movl 4(%esp), %edx # 4-byte Reload
+ adcl 16(%esp), %edx # 4-byte Folded Reload
+ movl %esi, 36(%ecx)
+ adcl 20(%esp), %eax # 4-byte Folded Reload
+ movl %edx, 40(%ecx)
+ adcl 28(%esp), %ebp # 4-byte Folded Reload
+ movl %eax, 44(%ecx)
+ adcl 32(%esp), %ebx # 4-byte Folded Reload
+ movl %ebp, 48(%ecx)
+ adcl 36(%esp), %edi # 4-byte Folded Reload
+ movl %ebx, 52(%ecx)
+ movl (%esp), %edx # 4-byte Reload
+ adcl 40(%esp), %edx # 4-byte Folded Reload
+ movl %edi, 56(%ecx)
+ movl 8(%esp), %eax # 4-byte Reload
+ adcl 44(%esp), %eax # 4-byte Folded Reload
+ movl %edx, 60(%ecx)
+ movl %eax, 64(%ecx)
+ movl 12(%esp), %eax # 4-byte Reload
+ adcl 48(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 68(%ecx)
+ addl $52, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end141:
+ .size mcl_fpDbl_sub9L, .Lfunc_end141-mcl_fpDbl_sub9L
+
+ .align 16, 0x90
+ .type .LmulPv320x32,@function
+.LmulPv320x32: # @mulPv320x32
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $64, %esp
+ movl %edx, %esi
+ movl 84(%esp), %edi
+ movl %edi, %eax
+ mull 36(%esi)
+ movl %edx, 60(%esp) # 4-byte Spill
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl %edi, %eax
+ mull 32(%esi)
+ movl %edx, 52(%esp) # 4-byte Spill
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl %edi, %eax
+ mull 28(%esi)
+ movl %edx, 44(%esp) # 4-byte Spill
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl %edi, %eax
+ mull 24(%esi)
+ movl %edx, 36(%esp) # 4-byte Spill
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl %edi, %eax
+ mull 20(%esi)
+ movl %edx, 28(%esp) # 4-byte Spill
+ movl %eax, 24(%esp) # 4-byte Spill
+ movl %edi, %eax
+ mull 16(%esi)
+ movl %edx, 20(%esp) # 4-byte Spill
+ movl %eax, 16(%esp) # 4-byte Spill
+ movl %edi, %eax
+ mull 12(%esi)
+ movl %edx, 12(%esp) # 4-byte Spill
+ movl %eax, 8(%esp) # 4-byte Spill
+ movl %edi, %eax
+ mull 8(%esi)
+ movl %edx, %ebp
+ movl %eax, 4(%esp) # 4-byte Spill
+ movl %edi, %eax
+ mull 4(%esi)
+ movl %edx, %ebx
+ movl %eax, (%esp) # 4-byte Spill
+ movl %edi, %eax
+ mull (%esi)
+ movl %eax, (%ecx)
+ addl (%esp), %edx # 4-byte Folded Reload
+ movl %edx, 4(%ecx)
+ adcl 4(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 8(%ecx)
+ adcl 8(%esp), %ebp # 4-byte Folded Reload
+ movl %ebp, 12(%ecx)
+ movl 12(%esp), %eax # 4-byte Reload
+ adcl 16(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 16(%ecx)
+ movl 20(%esp), %eax # 4-byte Reload
+ adcl 24(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 20(%ecx)
+ movl 28(%esp), %eax # 4-byte Reload
+ adcl 32(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 24(%ecx)
+ movl 36(%esp), %eax # 4-byte Reload
+ adcl 40(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 28(%ecx)
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 48(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 32(%ecx)
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 56(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 36(%ecx)
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl $0, %eax
+ movl %eax, 40(%ecx)
+ movl %ecx, %eax
+ addl $64, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end142:
+ .size .LmulPv320x32, .Lfunc_end142-.LmulPv320x32
+
+ .globl mcl_fp_mulUnitPre10L
+ .align 16, 0x90
+ .type mcl_fp_mulUnitPre10L,@function
+mcl_fp_mulUnitPre10L: # @mcl_fp_mulUnitPre10L
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $76, %esp
+ calll .L143$pb
+.L143$pb:
+ popl %ebx
+.Ltmp14:
+ addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp14-.L143$pb), %ebx
+ movl 104(%esp), %eax
+ movl %eax, (%esp)
+ leal 32(%esp), %ecx
+ movl 100(%esp), %edx
+ calll .LmulPv320x32
+ movl 72(%esp), %eax
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl 68(%esp), %eax
+ movl %eax, 24(%esp) # 4-byte Spill
+ movl 64(%esp), %eax
+ movl %eax, 20(%esp) # 4-byte Spill
+ movl 60(%esp), %eax
+ movl %eax, 16(%esp) # 4-byte Spill
+ movl 56(%esp), %eax
+ movl %eax, 12(%esp) # 4-byte Spill
+ movl 52(%esp), %ebx
+ movl 48(%esp), %ebp
+ movl 44(%esp), %edi
+ movl 40(%esp), %esi
+ movl 32(%esp), %edx
+ movl 36(%esp), %ecx
+ movl 96(%esp), %eax
+ movl %edx, (%eax)
+ movl %ecx, 4(%eax)
+ movl %esi, 8(%eax)
+ movl %edi, 12(%eax)
+ movl %ebp, 16(%eax)
+ movl %ebx, 20(%eax)
+ movl 12(%esp), %ecx # 4-byte Reload
+ movl %ecx, 24(%eax)
+ movl 16(%esp), %ecx # 4-byte Reload
+ movl %ecx, 28(%eax)
+ movl 20(%esp), %ecx # 4-byte Reload
+ movl %ecx, 32(%eax)
+ movl 24(%esp), %ecx # 4-byte Reload
+ movl %ecx, 36(%eax)
+ movl 28(%esp), %ecx # 4-byte Reload
+ movl %ecx, 40(%eax)
+ addl $76, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end143:
+ .size mcl_fp_mulUnitPre10L, .Lfunc_end143-mcl_fp_mulUnitPre10L
+
+ .globl mcl_fpDbl_mulPre10L
+ .align 16, 0x90
+ .type mcl_fpDbl_mulPre10L,@function
+mcl_fpDbl_mulPre10L: # @mcl_fpDbl_mulPre10L
+# BB#0:
+ pushl %ebp
+ movl %esp, %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $188, %esp
+ calll .L144$pb
+.L144$pb:
+ popl %ebx
+.Ltmp15:
+ addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp15-.L144$pb), %ebx
+ movl %ebx, -128(%ebp) # 4-byte Spill
+ movl 16(%ebp), %edi
+ movl %edi, 8(%esp)
+ movl 12(%ebp), %esi
+ movl %esi, 4(%esp)
+ movl 8(%ebp), %eax
+ movl %eax, (%esp)
+ calll mcl_fpDbl_mulPre5L@PLT
+ leal 20(%edi), %eax
+ movl %eax, 8(%esp)
+ leal 20(%esi), %eax
+ movl %eax, 4(%esp)
+ movl 8(%ebp), %eax
+ leal 40(%eax), %eax
+ movl %eax, (%esp)
+ calll mcl_fpDbl_mulPre5L@PLT
+ movl 28(%esi), %edi
+ movl (%esi), %ebx
+ movl 4(%esi), %eax
+ addl 20(%esi), %ebx
+ movl %ebx, -148(%ebp) # 4-byte Spill
+ adcl 24(%esi), %eax
+ movl %eax, -132(%ebp) # 4-byte Spill
+ adcl 8(%esi), %edi
+ movl %edi, -140(%ebp) # 4-byte Spill
+ seto %al
+ lahf
+ movl %eax, %eax
+ movl %eax, -96(%ebp) # 4-byte Spill
+ movl 16(%ebp), %esi
+ movl (%esi), %eax
+ movl 4(%esi), %ecx
+ addl 20(%esi), %eax
+ movl %eax, -152(%ebp) # 4-byte Spill
+ adcl 24(%esi), %ecx
+ movl %ecx, -120(%ebp) # 4-byte Spill
+ movl 28(%esi), %eax
+ adcl 8(%esi), %eax
+ movl %eax, -160(%ebp) # 4-byte Spill
+ movl 32(%esi), %eax
+ adcl 12(%esi), %eax
+ movl 36(%esi), %ecx
+ adcl 16(%esi), %ecx
+ pushl %eax
+ seto %al
+ lahf
+ movl %eax, %esi
+ popl %eax
+ movl %esi, -156(%ebp) # 4-byte Spill
+ movl %ebx, -124(%ebp) # 4-byte Spill
+ jb .LBB144_2
+# BB#1:
+ xorl %edi, %edi
+ movl $0, -124(%ebp) # 4-byte Folded Spill
+.LBB144_2:
+ movl %edi, -136(%ebp) # 4-byte Spill
+ movl 12(%ebp), %esi
+ movl %esi, %ebx
+ movl 36(%ebx), %esi
+ movl 32(%ebx), %edi
+ movl -96(%ebp), %edx # 4-byte Reload
+ pushl %eax
+ movl %edx, %eax
+ addb $127, %al
+ sahf
+ popl %eax
+ adcl 12(%ebx), %edi
+ movl %edi, -116(%ebp) # 4-byte Spill
+ adcl 16(%ebx), %esi
+ movl %esi, -144(%ebp) # 4-byte Spill
+ movl %ecx, -112(%ebp) # 4-byte Spill
+ movl %eax, -104(%ebp) # 4-byte Spill
+ movl -160(%ebp), %edx # 4-byte Reload
+ movl %edx, -108(%ebp) # 4-byte Spill
+ movl -120(%ebp), %esi # 4-byte Reload
+ movl %esi, -96(%ebp) # 4-byte Spill
+ movl -152(%ebp), %ebx # 4-byte Reload
+ movl %ebx, -100(%ebp) # 4-byte Spill
+ jb .LBB144_4
+# BB#3:
+ movl $0, -112(%ebp) # 4-byte Folded Spill
+ movl $0, -104(%ebp) # 4-byte Folded Spill
+ movl $0, -108(%ebp) # 4-byte Folded Spill
+ movl $0, -96(%ebp) # 4-byte Folded Spill
+ movl $0, -100(%ebp) # 4-byte Folded Spill
+.LBB144_4:
+ movl -148(%ebp), %esi # 4-byte Reload
+ movl %esi, -72(%ebp)
+ movl -132(%ebp), %edi # 4-byte Reload
+ movl %edi, -68(%ebp)
+ movl -140(%ebp), %esi # 4-byte Reload
+ movl %esi, -64(%ebp)
+ movl %ebx, -92(%ebp)
+ movl -120(%ebp), %esi # 4-byte Reload
+ movl %esi, -88(%ebp)
+ movl %edx, -84(%ebp)
+ movl %eax, -80(%ebp)
+ movl %ecx, -76(%ebp)
+ sbbl %edx, %edx
+ movl -116(%ebp), %eax # 4-byte Reload
+ movl %eax, -60(%ebp)
+ movl -144(%ebp), %ebx # 4-byte Reload
+ movl %ebx, -56(%ebp)
+ movl -156(%ebp), %ecx # 4-byte Reload
+ pushl %eax
+ movl %ecx, %eax
+ addb $127, %al
+ sahf
+ popl %eax
+ jb .LBB144_6
+# BB#5:
+ movl $0, %ebx
+ movl $0, %eax
+ movl $0, %edi
+.LBB144_6:
+ movl %eax, -116(%ebp) # 4-byte Spill
+ sbbl %eax, %eax
+ leal -92(%ebp), %ecx
+ movl %ecx, 8(%esp)
+ leal -72(%ebp), %ecx
+ movl %ecx, 4(%esp)
+ leal -52(%ebp), %ecx
+ movl %ecx, (%esp)
+ andl %eax, %edx
+ movl -124(%ebp), %eax # 4-byte Reload
+ addl %eax, -100(%ebp) # 4-byte Folded Spill
+ adcl %edi, -96(%ebp) # 4-byte Folded Spill
+ movl -108(%ebp), %esi # 4-byte Reload
+ adcl -136(%ebp), %esi # 4-byte Folded Reload
+ movl -116(%ebp), %eax # 4-byte Reload
+ adcl %eax, -104(%ebp) # 4-byte Folded Spill
+ movl -112(%ebp), %edi # 4-byte Reload
+ adcl %ebx, %edi
+ sbbl %eax, %eax
+ andl $1, %eax
+ movl %eax, -120(%ebp) # 4-byte Spill
+ andl $1, %edx
+ movl %edx, -116(%ebp) # 4-byte Spill
+ movl -128(%ebp), %ebx # 4-byte Reload
+ calll mcl_fpDbl_mulPre5L@PLT
+ movl -100(%ebp), %eax # 4-byte Reload
+ addl -32(%ebp), %eax
+ movl %eax, -100(%ebp) # 4-byte Spill
+ movl -96(%ebp), %eax # 4-byte Reload
+ adcl -28(%ebp), %eax
+ movl %eax, -96(%ebp) # 4-byte Spill
+ adcl -24(%ebp), %esi
+ movl %esi, -108(%ebp) # 4-byte Spill
+ movl -104(%ebp), %eax # 4-byte Reload
+ adcl -20(%ebp), %eax
+ movl %eax, -104(%ebp) # 4-byte Spill
+ adcl -16(%ebp), %edi
+ movl %edi, -112(%ebp) # 4-byte Spill
+ movl -120(%ebp), %eax # 4-byte Reload
+ adcl %eax, -116(%ebp) # 4-byte Folded Spill
+ movl -52(%ebp), %ecx
+ movl 8(%ebp), %esi
+ subl (%esi), %ecx
+ movl -48(%ebp), %ebx
+ sbbl 4(%esi), %ebx
+ movl -44(%ebp), %eax
+ sbbl 8(%esi), %eax
+ movl %eax, -120(%ebp) # 4-byte Spill
+ movl -40(%ebp), %edx
+ sbbl 12(%esi), %edx
+ movl -36(%ebp), %edi
+ sbbl 16(%esi), %edi
+ movl 20(%esi), %eax
+ movl %eax, -124(%ebp) # 4-byte Spill
+ sbbl %eax, -100(%ebp) # 4-byte Folded Spill
+ movl 24(%esi), %eax
+ movl %eax, -128(%ebp) # 4-byte Spill
+ sbbl %eax, -96(%ebp) # 4-byte Folded Spill
+ movl 28(%esi), %eax
+ movl %eax, -132(%ebp) # 4-byte Spill
+ sbbl %eax, -108(%ebp) # 4-byte Folded Spill
+ movl 32(%esi), %eax
+ movl %eax, -136(%ebp) # 4-byte Spill
+ sbbl %eax, -104(%ebp) # 4-byte Folded Spill
+ movl 36(%esi), %eax
+ movl %eax, -140(%ebp) # 4-byte Spill
+ sbbl %eax, -112(%ebp) # 4-byte Folded Spill
+ sbbl $0, -116(%ebp) # 4-byte Folded Spill
+ movl 40(%esi), %eax
+ movl %eax, -160(%ebp) # 4-byte Spill
+ subl %eax, %ecx
+ movl 44(%esi), %eax
+ movl %eax, -164(%ebp) # 4-byte Spill
+ sbbl %eax, %ebx
+ movl 48(%esi), %eax
+ movl %eax, -168(%ebp) # 4-byte Spill
+ sbbl %eax, -120(%ebp) # 4-byte Folded Spill
+ movl 52(%esi), %eax
+ movl %eax, -172(%ebp) # 4-byte Spill
+ sbbl %eax, %edx
+ movl 56(%esi), %eax
+ movl %eax, -176(%ebp) # 4-byte Spill
+ sbbl %eax, %edi
+ movl 60(%esi), %eax
+ movl %eax, -180(%ebp) # 4-byte Spill
+ sbbl %eax, -100(%ebp) # 4-byte Folded Spill
+ movl 64(%esi), %eax
+ movl %eax, -144(%ebp) # 4-byte Spill
+ sbbl %eax, -96(%ebp) # 4-byte Folded Spill
+ movl 68(%esi), %eax
+ movl %eax, -148(%ebp) # 4-byte Spill
+ sbbl %eax, -108(%ebp) # 4-byte Folded Spill
+ movl 72(%esi), %eax
+ movl %eax, -152(%ebp) # 4-byte Spill
+ sbbl %eax, -104(%ebp) # 4-byte Folded Spill
+ movl 76(%esi), %eax
+ movl %eax, -156(%ebp) # 4-byte Spill
+ sbbl %eax, -112(%ebp) # 4-byte Folded Spill
+ sbbl $0, -116(%ebp) # 4-byte Folded Spill
+ addl -124(%ebp), %ecx # 4-byte Folded Reload
+ adcl -128(%ebp), %ebx # 4-byte Folded Reload
+ movl %ecx, 20(%esi)
+ movl -120(%ebp), %eax # 4-byte Reload
+ adcl -132(%ebp), %eax # 4-byte Folded Reload
+ movl %ebx, 24(%esi)
+ adcl -136(%ebp), %edx # 4-byte Folded Reload
+ movl %eax, 28(%esi)
+ adcl -140(%ebp), %edi # 4-byte Folded Reload
+ movl %edx, 32(%esi)
+ movl -100(%ebp), %eax # 4-byte Reload
+ adcl -160(%ebp), %eax # 4-byte Folded Reload
+ movl %edi, 36(%esi)
+ movl -96(%ebp), %ecx # 4-byte Reload
+ adcl -164(%ebp), %ecx # 4-byte Folded Reload
+ movl %eax, 40(%esi)
+ movl -108(%ebp), %eax # 4-byte Reload
+ adcl -168(%ebp), %eax # 4-byte Folded Reload
+ movl %ecx, 44(%esi)
+ movl -104(%ebp), %ecx # 4-byte Reload
+ adcl -172(%ebp), %ecx # 4-byte Folded Reload
+ movl %eax, 48(%esi)
+ movl -112(%ebp), %edx # 4-byte Reload
+ adcl -176(%ebp), %edx # 4-byte Folded Reload
+ movl %ecx, 52(%esi)
+ movl -116(%ebp), %eax # 4-byte Reload
+ adcl -180(%ebp), %eax # 4-byte Folded Reload
+ movl %edx, 56(%esi)
+ movl %eax, 60(%esi)
+ movl -144(%ebp), %eax # 4-byte Reload
+ adcl $0, %eax
+ movl %eax, 64(%esi)
+ movl -148(%ebp), %eax # 4-byte Reload
+ adcl $0, %eax
+ movl %eax, 68(%esi)
+ movl -152(%ebp), %eax # 4-byte Reload
+ adcl $0, %eax
+ movl %eax, 72(%esi)
+ movl -156(%ebp), %eax # 4-byte Reload
+ adcl $0, %eax
+ movl %eax, 76(%esi)
+ addl $188, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end144:
+ .size mcl_fpDbl_mulPre10L, .Lfunc_end144-mcl_fpDbl_mulPre10L
+
+ .globl mcl_fpDbl_sqrPre10L
+ .align 16, 0x90
+ .type mcl_fpDbl_sqrPre10L,@function
+mcl_fpDbl_sqrPre10L: # @mcl_fpDbl_sqrPre10L
+# BB#0:
+ pushl %ebp
+ movl %esp, %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $188, %esp
+ calll .L145$pb
+.L145$pb:
+ popl %ebx
+.Ltmp16:
+ addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp16-.L145$pb), %ebx
+ movl %ebx, -120(%ebp) # 4-byte Spill
+ movl 12(%ebp), %edi
+ movl %edi, 8(%esp)
+ movl %edi, 4(%esp)
+ movl 8(%ebp), %esi
+ movl %esi, (%esp)
+ calll mcl_fpDbl_mulPre5L@PLT
+ leal 20(%edi), %eax
+ movl %eax, 8(%esp)
+ movl %eax, 4(%esp)
+ leal 40(%esi), %eax
+ movl %eax, (%esp)
+ calll mcl_fpDbl_mulPre5L@PLT
+ movl 36(%edi), %eax
+ movl 32(%edi), %ebx
+ movl 28(%edi), %esi
+ movl (%edi), %ecx
+ movl 4(%edi), %edx
+ addl 20(%edi), %ecx
+ adcl 24(%edi), %edx
+ adcl 8(%edi), %esi
+ adcl 12(%edi), %ebx
+ movl %ebx, -124(%ebp) # 4-byte Spill
+ adcl 16(%edi), %eax
+ pushl %eax
+ seto %al
+ lahf
+ movl %eax, %edi
+ popl %eax
+ movl %edi, -128(%ebp) # 4-byte Spill
+ pushl %eax
+ seto %al
+ lahf
+ movl %eax, %edi
+ popl %eax
+ movl %edi, -108(%ebp) # 4-byte Spill
+ pushl %eax
+ seto %al
+ lahf
+ movl %eax, %edi
+ popl %eax
+ movl %edi, -104(%ebp) # 4-byte Spill
+ pushl %eax
+ seto %al
+ lahf
+ movl %eax, %edi
+ popl %eax
+ movl %edi, -100(%ebp) # 4-byte Spill
+ pushl %eax
+ seto %al
+ lahf
+ movl %eax, %edi
+ popl %eax
+ movl %edi, -96(%ebp) # 4-byte Spill
+ pushl %eax
+ seto %al
+ lahf
+ movl %eax, %edi
+ popl %eax
+ sbbl %ebx, %ebx
+ movl %ebx, -116(%ebp) # 4-byte Spill
+ pushl %eax
+ movl %edi, %eax
+ addb $127, %al
+ sahf
+ popl %eax
+ jb .LBB145_1
+# BB#2:
+ movl $0, -112(%ebp) # 4-byte Folded Spill
+ jmp .LBB145_3
+.LBB145_1:
+ leal (%ecx,%ecx), %edi
+ movl %edi, -112(%ebp) # 4-byte Spill
+.LBB145_3:
+ movl -96(%ebp), %edi # 4-byte Reload
+ pushl %eax
+ movl %edi, %eax
+ addb $127, %al
+ sahf
+ popl %eax
+ movl -124(%ebp), %edi # 4-byte Reload
+ jb .LBB145_4
+# BB#5:
+ movl $0, -96(%ebp) # 4-byte Folded Spill
+ jmp .LBB145_6
+.LBB145_4:
+ movl %edx, %ebx
+ shldl $1, %ecx, %ebx
+ movl %ebx, -96(%ebp) # 4-byte Spill
+.LBB145_6:
+ movl -100(%ebp), %ebx # 4-byte Reload
+ pushl %eax
+ movl %ebx, %eax
+ addb $127, %al
+ sahf
+ popl %eax
+ jb .LBB145_7
+# BB#8:
+ movl $0, -100(%ebp) # 4-byte Folded Spill
+ jmp .LBB145_9
+.LBB145_7:
+ movl %esi, %ebx
+ shldl $1, %edx, %ebx
+ movl %ebx, -100(%ebp) # 4-byte Spill
+.LBB145_9:
+ movl -104(%ebp), %ebx # 4-byte Reload
+ pushl %eax
+ movl %ebx, %eax
+ addb $127, %al
+ sahf
+ popl %eax
+ jb .LBB145_10
+# BB#11:
+ movl $0, -104(%ebp) # 4-byte Folded Spill
+ jmp .LBB145_12
+.LBB145_10:
+ movl %edi, %ebx
+ shldl $1, %esi, %ebx
+ movl %ebx, -104(%ebp) # 4-byte Spill
+.LBB145_12:
+ movl -108(%ebp), %ebx # 4-byte Reload
+ pushl %eax
+ movl %ebx, %eax
+ addb $127, %al
+ sahf
+ popl %eax
+ jb .LBB145_13
+# BB#14:
+ movl $0, -108(%ebp) # 4-byte Folded Spill
+ jmp .LBB145_15
+.LBB145_13:
+ movl %eax, %ebx
+ shldl $1, %edi, %ebx
+ movl %ebx, -108(%ebp) # 4-byte Spill
+.LBB145_15:
+ movl %ecx, -72(%ebp)
+ movl %edx, -68(%ebp)
+ movl %esi, -64(%ebp)
+ movl %edi, -60(%ebp)
+ movl %eax, -56(%ebp)
+ movl %ecx, -92(%ebp)
+ movl %edx, -88(%ebp)
+ movl %esi, -84(%ebp)
+ movl %edi, -80(%ebp)
+ movl %eax, -76(%ebp)
+ movl -128(%ebp), %ecx # 4-byte Reload
+ pushl %eax
+ movl %ecx, %eax
+ addb $127, %al
+ sahf
+ popl %eax
+ jb .LBB145_16
+# BB#17:
+ movl $0, -124(%ebp) # 4-byte Folded Spill
+ jmp .LBB145_18
+.LBB145_16:
+ shrl $31, %eax
+ movl %eax, -124(%ebp) # 4-byte Spill
+.LBB145_18:
+ leal -52(%ebp), %eax
+ movl %eax, (%esp)
+ leal -72(%ebp), %eax
+ movl %eax, 4(%esp)
+ leal -92(%ebp), %eax
+ movl %eax, 8(%esp)
+ movl -116(%ebp), %esi # 4-byte Reload
+ andl $1, %esi
+ movl -120(%ebp), %ebx # 4-byte Reload
+ calll mcl_fpDbl_mulPre5L@PLT
+ movl -112(%ebp), %edi # 4-byte Reload
+ addl -32(%ebp), %edi
+ movl -96(%ebp), %eax # 4-byte Reload
+ adcl -28(%ebp), %eax
+ movl %eax, -96(%ebp) # 4-byte Spill
+ movl -100(%ebp), %eax # 4-byte Reload
+ adcl -24(%ebp), %eax
+ movl %eax, -100(%ebp) # 4-byte Spill
+ movl -104(%ebp), %eax # 4-byte Reload
+ adcl -20(%ebp), %eax
+ movl %eax, -104(%ebp) # 4-byte Spill
+ movl -108(%ebp), %eax # 4-byte Reload
+ adcl -16(%ebp), %eax
+ movl %eax, -108(%ebp) # 4-byte Spill
+ adcl -124(%ebp), %esi # 4-byte Folded Reload
+ movl -52(%ebp), %edx
+ movl 8(%ebp), %eax
+ subl (%eax), %edx
+ movl -48(%ebp), %ebx
+ sbbl 4(%eax), %ebx
+ movl -44(%ebp), %ecx
+ sbbl 8(%eax), %ecx
+ movl %ecx, -116(%ebp) # 4-byte Spill
+ movl -40(%ebp), %ecx
+ sbbl 12(%eax), %ecx
+ movl %ecx, -144(%ebp) # 4-byte Spill
+ movl -36(%ebp), %ecx
+ sbbl 16(%eax), %ecx
+ movl %ecx, -120(%ebp) # 4-byte Spill
+ movl 20(%eax), %ecx
+ movl %ecx, -124(%ebp) # 4-byte Spill
+ sbbl %ecx, %edi
+ movl %edi, -112(%ebp) # 4-byte Spill
+ movl 24(%eax), %ecx
+ movl %ecx, -128(%ebp) # 4-byte Spill
+ sbbl %ecx, -96(%ebp) # 4-byte Folded Spill
+ movl 28(%eax), %ecx
+ movl %ecx, -132(%ebp) # 4-byte Spill
+ sbbl %ecx, -100(%ebp) # 4-byte Folded Spill
+ movl 32(%eax), %ecx
+ movl %ecx, -136(%ebp) # 4-byte Spill
+ sbbl %ecx, -104(%ebp) # 4-byte Folded Spill
+ movl 36(%eax), %ecx
+ movl %ecx, -140(%ebp) # 4-byte Spill
+ sbbl %ecx, -108(%ebp) # 4-byte Folded Spill
+ sbbl $0, %esi
+ movl 40(%eax), %ecx
+ movl %ecx, -160(%ebp) # 4-byte Spill
+ subl %ecx, %edx
+ movl 44(%eax), %ecx
+ movl %ecx, -164(%ebp) # 4-byte Spill
+ sbbl %ecx, %ebx
+ movl 48(%eax), %ecx
+ movl %ecx, -168(%ebp) # 4-byte Spill
+ sbbl %ecx, -116(%ebp) # 4-byte Folded Spill
+ movl 52(%eax), %ecx
+ movl %ecx, -172(%ebp) # 4-byte Spill
+ movl -144(%ebp), %edi # 4-byte Reload
+ sbbl %ecx, %edi
+ movl 56(%eax), %ecx
+ movl %ecx, -176(%ebp) # 4-byte Spill
+ sbbl %ecx, -120(%ebp) # 4-byte Folded Spill
+ movl 60(%eax), %ecx
+ movl %ecx, -180(%ebp) # 4-byte Spill
+ sbbl %ecx, -112(%ebp) # 4-byte Folded Spill
+ movl 64(%eax), %ecx
+ movl %ecx, -144(%ebp) # 4-byte Spill
+ sbbl %ecx, -96(%ebp) # 4-byte Folded Spill
+ movl 68(%eax), %ecx
+ movl %ecx, -148(%ebp) # 4-byte Spill
+ sbbl %ecx, -100(%ebp) # 4-byte Folded Spill
+ movl 72(%eax), %ecx
+ movl %ecx, -152(%ebp) # 4-byte Spill
+ sbbl %ecx, -104(%ebp) # 4-byte Folded Spill
+ movl 76(%eax), %ecx
+ movl %ecx, -156(%ebp) # 4-byte Spill
+ sbbl %ecx, -108(%ebp) # 4-byte Folded Spill
+ sbbl $0, %esi
+ addl -124(%ebp), %edx # 4-byte Folded Reload
+ adcl -128(%ebp), %ebx # 4-byte Folded Reload
+ movl %edx, 20(%eax)
+ movl -116(%ebp), %ecx # 4-byte Reload
+ adcl -132(%ebp), %ecx # 4-byte Folded Reload
+ movl %ebx, 24(%eax)
+ adcl -136(%ebp), %edi # 4-byte Folded Reload
+ movl %ecx, 28(%eax)
+ movl -120(%ebp), %edx # 4-byte Reload
+ adcl -140(%ebp), %edx # 4-byte Folded Reload
+ movl %edi, 32(%eax)
+ movl -112(%ebp), %ecx # 4-byte Reload
+ adcl -160(%ebp), %ecx # 4-byte Folded Reload
+ movl %edx, 36(%eax)
+ movl -96(%ebp), %edx # 4-byte Reload
+ adcl -164(%ebp), %edx # 4-byte Folded Reload
+ movl %ecx, 40(%eax)
+ movl -100(%ebp), %ecx # 4-byte Reload
+ adcl -168(%ebp), %ecx # 4-byte Folded Reload
+ movl %edx, 44(%eax)
+ movl -104(%ebp), %edx # 4-byte Reload
+ adcl -172(%ebp), %edx # 4-byte Folded Reload
+ movl %ecx, 48(%eax)
+ movl -108(%ebp), %ecx # 4-byte Reload
+ adcl -176(%ebp), %ecx # 4-byte Folded Reload
+ movl %edx, 52(%eax)
+ adcl -180(%ebp), %esi # 4-byte Folded Reload
+ movl %ecx, 56(%eax)
+ movl %esi, 60(%eax)
+ movl -144(%ebp), %ecx # 4-byte Reload
+ adcl $0, %ecx
+ movl %ecx, 64(%eax)
+ movl -148(%ebp), %ecx # 4-byte Reload
+ adcl $0, %ecx
+ movl %ecx, 68(%eax)
+ movl -152(%ebp), %ecx # 4-byte Reload
+ adcl $0, %ecx
+ movl %ecx, 72(%eax)
+ movl -156(%ebp), %ecx # 4-byte Reload
+ adcl $0, %ecx
+ movl %ecx, 76(%eax)
+ addl $188, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end145:
+ .size mcl_fpDbl_sqrPre10L, .Lfunc_end145-mcl_fpDbl_sqrPre10L
+
+ .globl mcl_fp_mont10L
+ .align 16, 0x90
+ .type mcl_fp_mont10L,@function
+mcl_fp_mont10L: # @mcl_fp_mont10L
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $1036, %esp # imm = 0x40C
+ calll .L146$pb
+.L146$pb:
+ popl %ebx
+.Ltmp17:
+ addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp17-.L146$pb), %ebx
+ movl 1068(%esp), %eax
+ movl -4(%eax), %esi
+ movl %esi, 32(%esp) # 4-byte Spill
+ movl 1064(%esp), %eax
+ movl (%eax), %eax
+ movl %eax, (%esp)
+ leal 992(%esp), %ecx
+ movl 1060(%esp), %edx
+ calll .LmulPv320x32
+ movl 992(%esp), %edi
+ movl 996(%esp), %ebp
+ movl %edi, %eax
+ imull %esi, %eax
+ movl 1032(%esp), %ecx
+ movl %ecx, 76(%esp) # 4-byte Spill
+ movl 1028(%esp), %ecx
+ movl %ecx, 72(%esp) # 4-byte Spill
+ movl 1024(%esp), %ecx
+ movl %ecx, 68(%esp) # 4-byte Spill
+ movl 1020(%esp), %ecx
+ movl %ecx, 56(%esp) # 4-byte Spill
+ movl 1016(%esp), %ecx
+ movl %ecx, 48(%esp) # 4-byte Spill
+ movl 1012(%esp), %ecx
+ movl %ecx, 36(%esp) # 4-byte Spill
+ movl 1008(%esp), %ecx
+ movl %ecx, 40(%esp) # 4-byte Spill
+ movl 1004(%esp), %ecx
+ movl %ecx, 44(%esp) # 4-byte Spill
+ movl 1000(%esp), %esi
+ movl %eax, (%esp)
+ leal 944(%esp), %ecx
+ movl 1068(%esp), %edx
+ calll .LmulPv320x32
+ addl 944(%esp), %edi
+ adcl 948(%esp), %ebp
+ adcl 952(%esp), %esi
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 956(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 40(%esp), %eax # 4-byte Reload
+ adcl 960(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 36(%esp), %eax # 4-byte Reload
+ adcl 964(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 968(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 972(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 976(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 980(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 984(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ sbbl %edi, %edi
+ movl 1064(%esp), %eax
+ movl 4(%eax), %eax
+ movl %eax, (%esp)
+ leal 896(%esp), %ecx
+ movl 1060(%esp), %edx
+ calll .LmulPv320x32
+ andl $1, %edi
+ addl 896(%esp), %ebp
+ adcl 900(%esp), %esi
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 904(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 40(%esp), %eax # 4-byte Reload
+ adcl 908(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 36(%esp), %eax # 4-byte Reload
+ adcl 912(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 916(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 920(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 924(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 928(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 932(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ adcl 936(%esp), %edi
+ sbbl %eax, %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl %ebp, %eax
+ imull 32(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 848(%esp), %ecx
+ movl 1068(%esp), %edx
+ calll .LmulPv320x32
+ movl 64(%esp), %eax # 4-byte Reload
+ andl $1, %eax
+ addl 848(%esp), %ebp
+ adcl 852(%esp), %esi
+ movl 44(%esp), %ecx # 4-byte Reload
+ adcl 856(%esp), %ecx
+ movl %ecx, 44(%esp) # 4-byte Spill
+ movl 40(%esp), %ecx # 4-byte Reload
+ adcl 860(%esp), %ecx
+ movl %ecx, 40(%esp) # 4-byte Spill
+ movl 36(%esp), %ecx # 4-byte Reload
+ adcl 864(%esp), %ecx
+ movl %ecx, 36(%esp) # 4-byte Spill
+ movl 48(%esp), %ecx # 4-byte Reload
+ adcl 868(%esp), %ecx
+ movl %ecx, 48(%esp) # 4-byte Spill
+ movl 56(%esp), %ecx # 4-byte Reload
+ adcl 872(%esp), %ecx
+ movl %ecx, 56(%esp) # 4-byte Spill
+ movl 68(%esp), %ecx # 4-byte Reload
+ adcl 876(%esp), %ecx
+ movl %ecx, 68(%esp) # 4-byte Spill
+ movl 72(%esp), %ecx # 4-byte Reload
+ adcl 880(%esp), %ecx
+ movl %ecx, 72(%esp) # 4-byte Spill
+ movl 76(%esp), %ebp # 4-byte Reload
+ adcl 884(%esp), %ebp
+ adcl 888(%esp), %edi
+ adcl $0, %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 1064(%esp), %eax
+ movl 8(%eax), %eax
+ movl %eax, (%esp)
+ leal 800(%esp), %ecx
+ movl 1060(%esp), %edx
+ calll .LmulPv320x32
+ addl 800(%esp), %esi
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 804(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 40(%esp), %eax # 4-byte Reload
+ adcl 808(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 36(%esp), %eax # 4-byte Reload
+ adcl 812(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 816(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 820(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 824(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 828(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ adcl 832(%esp), %ebp
+ movl %ebp, 76(%esp) # 4-byte Spill
+ adcl 836(%esp), %edi
+ movl %edi, 60(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 840(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ sbbl %edi, %edi
+ movl %esi, %eax
+ imull 32(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 752(%esp), %ecx
+ movl 1068(%esp), %eax
+ movl %eax, %edx
+ calll .LmulPv320x32
+ andl $1, %edi
+ movl %edi, %ecx
+ addl 752(%esp), %esi
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 756(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 40(%esp), %eax # 4-byte Reload
+ adcl 760(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 36(%esp), %eax # 4-byte Reload
+ adcl 764(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 48(%esp), %ebp # 4-byte Reload
+ adcl 768(%esp), %ebp
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 772(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 776(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 72(%esp), %esi # 4-byte Reload
+ adcl 780(%esp), %esi
+ movl 76(%esp), %edi # 4-byte Reload
+ adcl 784(%esp), %edi
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 788(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 792(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ adcl $0, %ecx
+ movl %ecx, 52(%esp) # 4-byte Spill
+ movl 1064(%esp), %eax
+ movl 12(%eax), %eax
+ movl %eax, (%esp)
+ leal 704(%esp), %ecx
+ movl 1060(%esp), %edx
+ calll .LmulPv320x32
+ movl 44(%esp), %ecx # 4-byte Reload
+ addl 704(%esp), %ecx
+ movl %ecx, 44(%esp) # 4-byte Spill
+ movl 40(%esp), %eax # 4-byte Reload
+ adcl 708(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 36(%esp), %eax # 4-byte Reload
+ adcl 712(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ adcl 716(%esp), %ebp
+ movl %ebp, 48(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 720(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 724(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ adcl 728(%esp), %esi
+ movl %esi, 72(%esp) # 4-byte Spill
+ adcl 732(%esp), %edi
+ movl %edi, 76(%esp) # 4-byte Spill
+ movl 60(%esp), %ebp # 4-byte Reload
+ adcl 736(%esp), %ebp
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 740(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 52(%esp), %edi # 4-byte Reload
+ adcl 744(%esp), %edi
+ sbbl %esi, %esi
+ movl %ecx, %eax
+ imull 32(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 656(%esp), %ecx
+ movl 1068(%esp), %edx
+ calll .LmulPv320x32
+ andl $1, %esi
+ movl %esi, %ecx
+ movl 44(%esp), %eax # 4-byte Reload
+ addl 656(%esp), %eax
+ movl 40(%esp), %eax # 4-byte Reload
+ adcl 660(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 36(%esp), %eax # 4-byte Reload
+ adcl 664(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 668(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 672(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 68(%esp), %esi # 4-byte Reload
+ adcl 676(%esp), %esi
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 680(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 684(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ adcl 688(%esp), %ebp
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 692(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ adcl 696(%esp), %edi
+ adcl $0, %ecx
+ movl %ecx, 44(%esp) # 4-byte Spill
+ movl 1064(%esp), %eax
+ movl 16(%eax), %eax
+ movl %eax, (%esp)
+ leal 608(%esp), %ecx
+ movl 1060(%esp), %edx
+ calll .LmulPv320x32
+ movl 40(%esp), %ecx # 4-byte Reload
+ addl 608(%esp), %ecx
+ movl %ecx, 40(%esp) # 4-byte Spill
+ movl 36(%esp), %eax # 4-byte Reload
+ adcl 612(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 616(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 620(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ adcl 624(%esp), %esi
+ movl %esi, 68(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 628(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 632(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ adcl 636(%esp), %ebp
+ movl %ebp, 60(%esp) # 4-byte Spill
+ movl 64(%esp), %esi # 4-byte Reload
+ adcl 640(%esp), %esi
+ adcl 644(%esp), %edi
+ movl %edi, 52(%esp) # 4-byte Spill
+ movl 44(%esp), %edi # 4-byte Reload
+ adcl 648(%esp), %edi
+ sbbl %ebp, %ebp
+ movl %ecx, %eax
+ imull 32(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 560(%esp), %ecx
+ movl 1068(%esp), %edx
+ calll .LmulPv320x32
+ andl $1, %ebp
+ movl %ebp, %ecx
+ movl 40(%esp), %eax # 4-byte Reload
+ addl 560(%esp), %eax
+ movl 36(%esp), %eax # 4-byte Reload
+ adcl 564(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 568(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 56(%esp), %ebp # 4-byte Reload
+ adcl 572(%esp), %ebp
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 576(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 580(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 584(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 588(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ adcl 592(%esp), %esi
+ movl %esi, 64(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 596(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ adcl 600(%esp), %edi
+ adcl $0, %ecx
+ movl %ecx, 40(%esp) # 4-byte Spill
+ movl 1064(%esp), %eax
+ movl 20(%eax), %eax
+ movl %eax, (%esp)
+ leal 512(%esp), %ecx
+ movl 1060(%esp), %edx
+ calll .LmulPv320x32
+ movl 36(%esp), %ecx # 4-byte Reload
+ addl 512(%esp), %ecx
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 516(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl %ebp, %esi
+ adcl 520(%esp), %esi
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 524(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 528(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 532(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 536(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 540(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 544(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ adcl 548(%esp), %edi
+ movl %edi, 44(%esp) # 4-byte Spill
+ movl 40(%esp), %eax # 4-byte Reload
+ adcl 552(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ sbbl %ebp, %ebp
+ movl %ecx, %eax
+ movl %ecx, %edi
+ imull 32(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 464(%esp), %ecx
+ movl 1068(%esp), %edx
+ calll .LmulPv320x32
+ andl $1, %ebp
+ movl %ebp, %eax
+ addl 464(%esp), %edi
+ movl 48(%esp), %ecx # 4-byte Reload
+ adcl 468(%esp), %ecx
+ movl %ecx, 48(%esp) # 4-byte Spill
+ adcl 472(%esp), %esi
+ movl %esi, 56(%esp) # 4-byte Spill
+ movl 68(%esp), %ecx # 4-byte Reload
+ adcl 476(%esp), %ecx
+ movl %ecx, 68(%esp) # 4-byte Spill
+ movl 72(%esp), %ecx # 4-byte Reload
+ adcl 480(%esp), %ecx
+ movl %ecx, 72(%esp) # 4-byte Spill
+ movl 76(%esp), %esi # 4-byte Reload
+ adcl 484(%esp), %esi
+ movl 60(%esp), %ecx # 4-byte Reload
+ adcl 488(%esp), %ecx
+ movl %ecx, 60(%esp) # 4-byte Spill
+ movl 64(%esp), %ecx # 4-byte Reload
+ adcl 492(%esp), %ecx
+ movl %ecx, 64(%esp) # 4-byte Spill
+ movl 52(%esp), %ebp # 4-byte Reload
+ adcl 496(%esp), %ebp
+ movl 44(%esp), %ecx # 4-byte Reload
+ adcl 500(%esp), %ecx
+ movl %ecx, 44(%esp) # 4-byte Spill
+ movl 40(%esp), %edi # 4-byte Reload
+ adcl 504(%esp), %edi
+ adcl $0, %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 1064(%esp), %eax
+ movl 24(%eax), %eax
+ movl %eax, (%esp)
+ leal 416(%esp), %ecx
+ movl 1060(%esp), %eax
+ movl %eax, %edx
+ calll .LmulPv320x32
+ movl 48(%esp), %ecx # 4-byte Reload
+ addl 416(%esp), %ecx
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 420(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 424(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 428(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ adcl 432(%esp), %esi
+ movl %esi, 76(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 436(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 440(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ adcl 444(%esp), %ebp
+ movl %ebp, 52(%esp) # 4-byte Spill
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 448(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ adcl 452(%esp), %edi
+ movl %edi, 40(%esp) # 4-byte Spill
+ movl 36(%esp), %eax # 4-byte Reload
+ adcl 456(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ sbbl %edi, %edi
+ movl %ecx, %eax
+ movl %ecx, %esi
+ imull 32(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 368(%esp), %ecx
+ movl 1068(%esp), %edx
+ calll .LmulPv320x32
+ andl $1, %edi
+ movl %edi, %ecx
+ addl 368(%esp), %esi
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 372(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 376(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 72(%esp), %esi # 4-byte Reload
+ adcl 380(%esp), %esi
+ movl 76(%esp), %ebp # 4-byte Reload
+ adcl 384(%esp), %ebp
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 388(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 392(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 396(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 44(%esp), %edi # 4-byte Reload
+ adcl 400(%esp), %edi
+ movl 40(%esp), %eax # 4-byte Reload
+ adcl 404(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 36(%esp), %eax # 4-byte Reload
+ adcl 408(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ adcl $0, %ecx
+ movl %ecx, 48(%esp) # 4-byte Spill
+ movl 1064(%esp), %eax
+ movl 28(%eax), %eax
+ movl %eax, (%esp)
+ leal 320(%esp), %ecx
+ movl 1060(%esp), %edx
+ calll .LmulPv320x32
+ movl 56(%esp), %ecx # 4-byte Reload
+ addl 320(%esp), %ecx
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 324(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ adcl 328(%esp), %esi
+ movl %esi, 72(%esp) # 4-byte Spill
+ adcl 332(%esp), %ebp
+ movl %ebp, 76(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 336(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 340(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 344(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ adcl 348(%esp), %edi
+ movl %edi, 44(%esp) # 4-byte Spill
+ movl 40(%esp), %eax # 4-byte Reload
+ adcl 352(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 36(%esp), %eax # 4-byte Reload
+ adcl 356(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 48(%esp), %ebp # 4-byte Reload
+ adcl 360(%esp), %ebp
+ sbbl %edi, %edi
+ movl %ecx, %eax
+ movl %ecx, %esi
+ imull 32(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 272(%esp), %ecx
+ movl 1068(%esp), %edx
+ calll .LmulPv320x32
+ andl $1, %edi
+ movl %edi, %ecx
+ addl 272(%esp), %esi
+ movl 68(%esp), %edi # 4-byte Reload
+ adcl 276(%esp), %edi
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 280(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 284(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 60(%esp), %esi # 4-byte Reload
+ adcl 288(%esp), %esi
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 292(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 296(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 300(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 40(%esp), %eax # 4-byte Reload
+ adcl 304(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 36(%esp), %eax # 4-byte Reload
+ adcl 308(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ adcl 312(%esp), %ebp
+ movl %ebp, 48(%esp) # 4-byte Spill
+ movl %ecx, %ebp
+ adcl $0, %ebp
+ movl 1064(%esp), %eax
+ movl 32(%eax), %eax
+ movl %eax, (%esp)
+ leal 224(%esp), %ecx
+ movl 1060(%esp), %edx
+ calll .LmulPv320x32
+ movl %edi, %ecx
+ addl 224(%esp), %ecx
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 228(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 232(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ adcl 236(%esp), %esi
+ movl %esi, 60(%esp) # 4-byte Spill
+ movl 64(%esp), %edi # 4-byte Reload
+ adcl 240(%esp), %edi
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 244(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 248(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 40(%esp), %eax # 4-byte Reload
+ adcl 252(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 36(%esp), %eax # 4-byte Reload
+ adcl 256(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 260(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ adcl 264(%esp), %ebp
+ movl %ebp, 68(%esp) # 4-byte Spill
+ sbbl %ebp, %ebp
+ movl %ecx, %eax
+ movl %ecx, %esi
+ imull 32(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 176(%esp), %ecx
+ movl 1068(%esp), %edx
+ calll .LmulPv320x32
+ andl $1, %ebp
+ addl 176(%esp), %esi
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 180(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 184(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 188(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl %edi, %esi
+ adcl 192(%esp), %esi
+ movl 52(%esp), %edi # 4-byte Reload
+ adcl 196(%esp), %edi
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 200(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 40(%esp), %eax # 4-byte Reload
+ adcl 204(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 36(%esp), %eax # 4-byte Reload
+ adcl 208(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 212(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 216(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ adcl $0, %ebp
+ movl 1064(%esp), %eax
+ movl 36(%eax), %eax
+ movl %eax, (%esp)
+ leal 128(%esp), %ecx
+ movl 1060(%esp), %edx
+ calll .LmulPv320x32
+ movl 72(%esp), %ecx # 4-byte Reload
+ addl 128(%esp), %ecx
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 132(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 136(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ adcl 140(%esp), %esi
+ movl %esi, 64(%esp) # 4-byte Spill
+ adcl 144(%esp), %edi
+ movl %edi, 52(%esp) # 4-byte Spill
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 148(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 40(%esp), %eax # 4-byte Reload
+ adcl 152(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 36(%esp), %eax # 4-byte Reload
+ adcl 156(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 160(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 164(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ adcl 168(%esp), %ebp
+ sbbl %esi, %esi
+ movl 32(%esp), %eax # 4-byte Reload
+ imull %ecx, %eax
+ movl %ecx, %edi
+ movl %eax, (%esp)
+ leal 80(%esp), %ecx
+ movl 1068(%esp), %edx
+ calll .LmulPv320x32
+ andl $1, %esi
+ addl 80(%esp), %edi
+ movl 76(%esp), %eax # 4-byte Reload
+ movl 64(%esp), %ebx # 4-byte Reload
+ adcl 84(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 60(%esp), %ecx # 4-byte Reload
+ adcl 88(%esp), %ecx
+ movl %ecx, 60(%esp) # 4-byte Spill
+ adcl 92(%esp), %ebx
+ movl 52(%esp), %edx # 4-byte Reload
+ adcl 96(%esp), %edx
+ movl %edx, 52(%esp) # 4-byte Spill
+ movl 44(%esp), %edx # 4-byte Reload
+ adcl 100(%esp), %edx
+ movl %edx, 44(%esp) # 4-byte Spill
+ movl 40(%esp), %edx # 4-byte Reload
+ adcl 104(%esp), %edx
+ movl %edx, 40(%esp) # 4-byte Spill
+ movl %edx, %edi
+ movl 36(%esp), %edx # 4-byte Reload
+ adcl 108(%esp), %edx
+ movl %edx, 36(%esp) # 4-byte Spill
+ movl 48(%esp), %edx # 4-byte Reload
+ adcl 112(%esp), %edx
+ movl %edx, 48(%esp) # 4-byte Spill
+ movl 68(%esp), %edx # 4-byte Reload
+ adcl 116(%esp), %edx
+ movl %edx, 68(%esp) # 4-byte Spill
+ adcl 120(%esp), %ebp
+ movl %ebp, 72(%esp) # 4-byte Spill
+ adcl $0, %esi
+ movl 1068(%esp), %edx
+ subl (%edx), %eax
+ sbbl 4(%edx), %ecx
+ movl %ecx, 16(%esp) # 4-byte Spill
+ movl %ebx, %ecx
+ sbbl 8(%edx), %ecx
+ movl %ecx, 20(%esp) # 4-byte Spill
+ movl 52(%esp), %ecx # 4-byte Reload
+ sbbl 12(%edx), %ecx
+ movl %ecx, 24(%esp) # 4-byte Spill
+ movl 44(%esp), %ecx # 4-byte Reload
+ sbbl 16(%edx), %ecx
+ movl %ecx, 28(%esp) # 4-byte Spill
+ sbbl 20(%edx), %edi
+ movl %edi, 32(%esp) # 4-byte Spill
+ movl 36(%esp), %ecx # 4-byte Reload
+ sbbl 24(%edx), %ecx
+ movl %ecx, 56(%esp) # 4-byte Spill
+ movl 48(%esp), %ecx # 4-byte Reload
+ sbbl 28(%edx), %ecx
+ movl 68(%esp), %edi # 4-byte Reload
+ sbbl 32(%edx), %edi
+ movl %edi, 64(%esp) # 4-byte Spill
+ sbbl 36(%edx), %ebp
+ movl %ebp, %edx
+ sbbl $0, %esi
+ andl $1, %esi
+ jne .LBB146_2
+# BB#1:
+ movl %ecx, 48(%esp) # 4-byte Spill
+.LBB146_2:
+ movl %esi, %ecx
+ testb %cl, %cl
+ movl 76(%esp), %esi # 4-byte Reload
+ jne .LBB146_4
+# BB#3:
+ movl %eax, %esi
+.LBB146_4:
+ movl 1056(%esp), %eax
+ movl %esi, (%eax)
+ movl 60(%esp), %edi # 4-byte Reload
+ jne .LBB146_6
+# BB#5:
+ movl 16(%esp), %edi # 4-byte Reload
+.LBB146_6:
+ movl %edi, 4(%eax)
+ jne .LBB146_8
+# BB#7:
+ movl 20(%esp), %ebx # 4-byte Reload
+.LBB146_8:
+ movl %ebx, 8(%eax)
+ movl 52(%esp), %ebp # 4-byte Reload
+ movl 44(%esp), %ecx # 4-byte Reload
+ jne .LBB146_10
+# BB#9:
+ movl 24(%esp), %ebp # 4-byte Reload
+.LBB146_10:
+ movl %ebp, 12(%eax)
+ jne .LBB146_12
+# BB#11:
+ movl 28(%esp), %ecx # 4-byte Reload
+.LBB146_12:
+ movl %ecx, 16(%eax)
+ movl 40(%esp), %ecx # 4-byte Reload
+ jne .LBB146_14
+# BB#13:
+ movl 32(%esp), %ecx # 4-byte Reload
+.LBB146_14:
+ movl %ecx, 20(%eax)
+ movl 36(%esp), %ecx # 4-byte Reload
+ jne .LBB146_16
+# BB#15:
+ movl 56(%esp), %ecx # 4-byte Reload
+.LBB146_16:
+ movl %ecx, 24(%eax)
+ movl 48(%esp), %ecx # 4-byte Reload
+ movl %ecx, 28(%eax)
+ movl 68(%esp), %ecx # 4-byte Reload
+ jne .LBB146_18
+# BB#17:
+ movl 64(%esp), %ecx # 4-byte Reload
+.LBB146_18:
+ movl %ecx, 32(%eax)
+ movl 72(%esp), %ecx # 4-byte Reload
+ jne .LBB146_20
+# BB#19:
+ movl %edx, %ecx
+.LBB146_20:
+ movl %ecx, 36(%eax)
+ addl $1036, %esp # imm = 0x40C
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end146:
+ .size mcl_fp_mont10L, .Lfunc_end146-mcl_fp_mont10L
+
+ .globl mcl_fp_montNF10L
+ .align 16, 0x90
+ .type mcl_fp_montNF10L,@function
+mcl_fp_montNF10L: # @mcl_fp_montNF10L
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $1020, %esp # imm = 0x3FC
+ calll .L147$pb
+.L147$pb:
+ popl %ebx
+.Ltmp18:
+ addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp18-.L147$pb), %ebx
+ movl 1052(%esp), %eax
+ movl -4(%eax), %ebp
+ movl %ebp, 20(%esp) # 4-byte Spill
+ movl 1048(%esp), %eax
+ movl (%eax), %eax
+ movl %eax, (%esp)
+ leal 976(%esp), %ecx
+ movl 1044(%esp), %edx
+ calll .LmulPv320x32
+ movl 976(%esp), %edi
+ movl 980(%esp), %esi
+ movl %edi, %eax
+ imull %ebp, %eax
+ movl 1016(%esp), %ecx
+ movl %ecx, 60(%esp) # 4-byte Spill
+ movl 1012(%esp), %ecx
+ movl %ecx, 56(%esp) # 4-byte Spill
+ movl 1008(%esp), %ecx
+ movl %ecx, 48(%esp) # 4-byte Spill
+ movl 1004(%esp), %ecx
+ movl %ecx, 36(%esp) # 4-byte Spill
+ movl 1000(%esp), %ecx
+ movl %ecx, 40(%esp) # 4-byte Spill
+ movl 996(%esp), %ecx
+ movl %ecx, 24(%esp) # 4-byte Spill
+ movl 992(%esp), %ecx
+ movl %ecx, 44(%esp) # 4-byte Spill
+ movl 988(%esp), %ecx
+ movl %ecx, 28(%esp) # 4-byte Spill
+ movl 984(%esp), %ebp
+ movl %eax, (%esp)
+ leal 928(%esp), %ecx
+ movl 1052(%esp), %edx
+ calll .LmulPv320x32
+ addl 928(%esp), %edi
+ adcl 932(%esp), %esi
+ adcl 936(%esp), %ebp
+ movl 28(%esp), %eax # 4-byte Reload
+ adcl 940(%esp), %eax
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 944(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 24(%esp), %eax # 4-byte Reload
+ adcl 948(%esp), %eax
+ movl %eax, 24(%esp) # 4-byte Spill
+ movl 40(%esp), %edi # 4-byte Reload
+ adcl 952(%esp), %edi
+ movl 36(%esp), %eax # 4-byte Reload
+ adcl 956(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 960(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 964(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 968(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 1048(%esp), %eax
+ movl 4(%eax), %eax
+ movl %eax, (%esp)
+ leal 880(%esp), %ecx
+ movl 1044(%esp), %edx
+ calll .LmulPv320x32
+ movl 920(%esp), %ecx
+ addl 880(%esp), %esi
+ adcl 884(%esp), %ebp
+ movl 28(%esp), %eax # 4-byte Reload
+ adcl 888(%esp), %eax
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 892(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 24(%esp), %eax # 4-byte Reload
+ adcl 896(%esp), %eax
+ movl %eax, 24(%esp) # 4-byte Spill
+ adcl 900(%esp), %edi
+ movl %edi, 40(%esp) # 4-byte Spill
+ movl 36(%esp), %eax # 4-byte Reload
+ adcl 904(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 908(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 912(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 916(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ adcl $0, %ecx
+ movl %ecx, 52(%esp) # 4-byte Spill
+ movl %esi, %eax
+ imull 20(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 832(%esp), %ecx
+ movl 1052(%esp), %edx
+ calll .LmulPv320x32
+ addl 832(%esp), %esi
+ adcl 836(%esp), %ebp
+ movl 28(%esp), %eax # 4-byte Reload
+ adcl 840(%esp), %eax
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 844(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 24(%esp), %edi # 4-byte Reload
+ adcl 848(%esp), %edi
+ movl 40(%esp), %eax # 4-byte Reload
+ adcl 852(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 36(%esp), %eax # 4-byte Reload
+ adcl 856(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 860(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 864(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 868(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 52(%esp), %esi # 4-byte Reload
+ adcl 872(%esp), %esi
+ movl 1048(%esp), %eax
+ movl 8(%eax), %eax
+ movl %eax, (%esp)
+ leal 784(%esp), %ecx
+ movl 1044(%esp), %edx
+ calll .LmulPv320x32
+ movl 824(%esp), %ecx
+ addl 784(%esp), %ebp
+ movl 28(%esp), %eax # 4-byte Reload
+ adcl 788(%esp), %eax
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 792(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ adcl 796(%esp), %edi
+ movl %edi, 24(%esp) # 4-byte Spill
+ movl 40(%esp), %eax # 4-byte Reload
+ adcl 800(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 36(%esp), %eax # 4-byte Reload
+ adcl 804(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 48(%esp), %edi # 4-byte Reload
+ adcl 808(%esp), %edi
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 812(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 816(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ adcl 820(%esp), %esi
+ movl %esi, 52(%esp) # 4-byte Spill
+ adcl $0, %ecx
+ movl %ecx, 32(%esp) # 4-byte Spill
+ movl %ebp, %eax
+ imull 20(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 736(%esp), %ecx
+ movl 1052(%esp), %edx
+ calll .LmulPv320x32
+ addl 736(%esp), %ebp
+ movl 28(%esp), %eax # 4-byte Reload
+ adcl 740(%esp), %eax
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 744(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 24(%esp), %eax # 4-byte Reload
+ adcl 748(%esp), %eax
+ movl %eax, 24(%esp) # 4-byte Spill
+ movl 40(%esp), %eax # 4-byte Reload
+ adcl 752(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 36(%esp), %eax # 4-byte Reload
+ adcl 756(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ adcl 760(%esp), %edi
+ movl 56(%esp), %ebp # 4-byte Reload
+ adcl 764(%esp), %ebp
+ movl 60(%esp), %esi # 4-byte Reload
+ adcl 768(%esp), %esi
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 772(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 32(%esp), %eax # 4-byte Reload
+ adcl 776(%esp), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 1048(%esp), %eax
+ movl 12(%eax), %eax
+ movl %eax, (%esp)
+ leal 688(%esp), %ecx
+ movl 1044(%esp), %eax
+ movl %eax, %edx
+ calll .LmulPv320x32
+ movl 728(%esp), %eax
+ movl 28(%esp), %edx # 4-byte Reload
+ addl 688(%esp), %edx
+ movl 44(%esp), %ecx # 4-byte Reload
+ adcl 692(%esp), %ecx
+ movl %ecx, 44(%esp) # 4-byte Spill
+ movl 24(%esp), %ecx # 4-byte Reload
+ adcl 696(%esp), %ecx
+ movl %ecx, 24(%esp) # 4-byte Spill
+ movl 40(%esp), %ecx # 4-byte Reload
+ adcl 700(%esp), %ecx
+ movl %ecx, 40(%esp) # 4-byte Spill
+ movl 36(%esp), %ecx # 4-byte Reload
+ adcl 704(%esp), %ecx
+ movl %ecx, 36(%esp) # 4-byte Spill
+ adcl 708(%esp), %edi
+ movl %edi, 48(%esp) # 4-byte Spill
+ adcl 712(%esp), %ebp
+ movl %ebp, 56(%esp) # 4-byte Spill
+ adcl 716(%esp), %esi
+ movl %esi, 60(%esp) # 4-byte Spill
+ movl 52(%esp), %ebp # 4-byte Reload
+ adcl 720(%esp), %ebp
+ movl 32(%esp), %esi # 4-byte Reload
+ adcl 724(%esp), %esi
+ adcl $0, %eax
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl %edx, %eax
+ movl %edx, %edi
+ imull 20(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 640(%esp), %ecx
+ movl 1052(%esp), %eax
+ movl %eax, %edx
+ calll .LmulPv320x32
+ addl 640(%esp), %edi
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 644(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 24(%esp), %eax # 4-byte Reload
+ adcl 648(%esp), %eax
+ movl %eax, 24(%esp) # 4-byte Spill
+ movl 40(%esp), %eax # 4-byte Reload
+ adcl 652(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 36(%esp), %edi # 4-byte Reload
+ adcl 656(%esp), %edi
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 660(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 664(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 668(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ adcl 672(%esp), %ebp
+ movl %ebp, 52(%esp) # 4-byte Spill
+ adcl 676(%esp), %esi
+ movl %esi, %ebp
+ movl 28(%esp), %esi # 4-byte Reload
+ adcl 680(%esp), %esi
+ movl 1048(%esp), %eax
+ movl 16(%eax), %eax
+ movl %eax, (%esp)
+ leal 592(%esp), %ecx
+ movl 1044(%esp), %edx
+ calll .LmulPv320x32
+ movl 632(%esp), %edx
+ movl 44(%esp), %ecx # 4-byte Reload
+ addl 592(%esp), %ecx
+ movl 24(%esp), %eax # 4-byte Reload
+ adcl 596(%esp), %eax
+ movl %eax, 24(%esp) # 4-byte Spill
+ movl 40(%esp), %eax # 4-byte Reload
+ adcl 600(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ adcl 604(%esp), %edi
+ movl %edi, 36(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 608(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 612(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 616(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 620(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ adcl 624(%esp), %ebp
+ movl %ebp, 32(%esp) # 4-byte Spill
+ adcl 628(%esp), %esi
+ movl %esi, 28(%esp) # 4-byte Spill
+ adcl $0, %edx
+ movl %edx, 44(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ movl %ecx, %esi
+ imull 20(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 544(%esp), %ecx
+ movl 1052(%esp), %edx
+ calll .LmulPv320x32
+ addl 544(%esp), %esi
+ movl 24(%esp), %edi # 4-byte Reload
+ adcl 548(%esp), %edi
+ movl 40(%esp), %esi # 4-byte Reload
+ adcl 552(%esp), %esi
+ movl 36(%esp), %ebp # 4-byte Reload
+ adcl 556(%esp), %ebp
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 560(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 564(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 568(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 572(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 32(%esp), %eax # 4-byte Reload
+ adcl 576(%esp), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 28(%esp), %eax # 4-byte Reload
+ adcl 580(%esp), %eax
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 584(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 1048(%esp), %eax
+ movl 20(%eax), %eax
+ movl %eax, (%esp)
+ leal 496(%esp), %ecx
+ movl 1044(%esp), %edx
+ calll .LmulPv320x32
+ movl 536(%esp), %edx
+ addl 496(%esp), %edi
+ adcl 500(%esp), %esi
+ movl %esi, 40(%esp) # 4-byte Spill
+ adcl 504(%esp), %ebp
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 508(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 512(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 516(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 520(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 32(%esp), %eax # 4-byte Reload
+ adcl 524(%esp), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 28(%esp), %esi # 4-byte Reload
+ adcl 528(%esp), %esi
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 532(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ adcl $0, %edx
+ movl %edx, 24(%esp) # 4-byte Spill
+ movl %edi, %eax
+ imull 20(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 448(%esp), %ecx
+ movl 1052(%esp), %edx
+ calll .LmulPv320x32
+ addl 448(%esp), %edi
+ movl 40(%esp), %eax # 4-byte Reload
+ adcl 452(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ adcl 456(%esp), %ebp
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 460(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 56(%esp), %edi # 4-byte Reload
+ adcl 464(%esp), %edi
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 468(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 472(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 32(%esp), %eax # 4-byte Reload
+ adcl 476(%esp), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ adcl 480(%esp), %esi
+ movl %esi, 28(%esp) # 4-byte Spill
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 484(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 24(%esp), %esi # 4-byte Reload
+ adcl 488(%esp), %esi
+ movl 1048(%esp), %eax
+ movl 24(%eax), %eax
+ movl %eax, (%esp)
+ leal 400(%esp), %ecx
+ movl 1044(%esp), %edx
+ calll .LmulPv320x32
+ movl 440(%esp), %eax
+ movl 40(%esp), %ecx # 4-byte Reload
+ addl 400(%esp), %ecx
+ adcl 404(%esp), %ebp
+ movl 48(%esp), %edx # 4-byte Reload
+ adcl 408(%esp), %edx
+ movl %edx, 48(%esp) # 4-byte Spill
+ adcl 412(%esp), %edi
+ movl %edi, 56(%esp) # 4-byte Spill
+ movl 60(%esp), %edi # 4-byte Reload
+ adcl 416(%esp), %edi
+ movl 52(%esp), %edx # 4-byte Reload
+ adcl 420(%esp), %edx
+ movl %edx, 52(%esp) # 4-byte Spill
+ movl 32(%esp), %edx # 4-byte Reload
+ adcl 424(%esp), %edx
+ movl %edx, 32(%esp) # 4-byte Spill
+ movl 28(%esp), %edx # 4-byte Reload
+ adcl 428(%esp), %edx
+ movl %edx, 28(%esp) # 4-byte Spill
+ movl 44(%esp), %edx # 4-byte Reload
+ adcl 432(%esp), %edx
+ movl %edx, 44(%esp) # 4-byte Spill
+ adcl 436(%esp), %esi
+ movl %esi, 24(%esp) # 4-byte Spill
+ adcl $0, %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ movl %ecx, %esi
+ imull 20(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 352(%esp), %ecx
+ movl 1052(%esp), %edx
+ calll .LmulPv320x32
+ addl 352(%esp), %esi
+ adcl 356(%esp), %ebp
+ movl %ebp, 36(%esp) # 4-byte Spill
+ movl 48(%esp), %ebp # 4-byte Reload
+ adcl 360(%esp), %ebp
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 364(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl %edi, %esi
+ adcl 368(%esp), %esi
+ movl 52(%esp), %edi # 4-byte Reload
+ adcl 372(%esp), %edi
+ movl 32(%esp), %eax # 4-byte Reload
+ adcl 376(%esp), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 28(%esp), %eax # 4-byte Reload
+ adcl 380(%esp), %eax
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 384(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 24(%esp), %eax # 4-byte Reload
+ adcl 388(%esp), %eax
+ movl %eax, 24(%esp) # 4-byte Spill
+ movl 40(%esp), %eax # 4-byte Reload
+ adcl 392(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 1048(%esp), %eax
+ movl 28(%eax), %eax
+ movl %eax, (%esp)
+ leal 304(%esp), %ecx
+ movl 1044(%esp), %eax
+ movl %eax, %edx
+ calll .LmulPv320x32
+ movl 344(%esp), %edx
+ movl 36(%esp), %ecx # 4-byte Reload
+ addl 304(%esp), %ecx
+ adcl 308(%esp), %ebp
+ movl %ebp, 48(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 312(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ adcl 316(%esp), %esi
+ movl %esi, 60(%esp) # 4-byte Spill
+ adcl 320(%esp), %edi
+ movl %edi, 52(%esp) # 4-byte Spill
+ movl 32(%esp), %ebp # 4-byte Reload
+ adcl 324(%esp), %ebp
+ movl 28(%esp), %eax # 4-byte Reload
+ adcl 328(%esp), %eax
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl 44(%esp), %esi # 4-byte Reload
+ adcl 332(%esp), %esi
+ movl 24(%esp), %eax # 4-byte Reload
+ adcl 336(%esp), %eax
+ movl %eax, 24(%esp) # 4-byte Spill
+ movl 40(%esp), %eax # 4-byte Reload
+ adcl 340(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ adcl $0, %edx
+ movl %edx, 36(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ movl %ecx, %edi
+ imull 20(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 256(%esp), %ecx
+ movl 1052(%esp), %edx
+ calll .LmulPv320x32
+ addl 256(%esp), %edi
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 260(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 264(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 268(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 52(%esp), %edi # 4-byte Reload
+ adcl 272(%esp), %edi
+ adcl 276(%esp), %ebp
+ movl 28(%esp), %eax # 4-byte Reload
+ adcl 280(%esp), %eax
+ movl %eax, 28(%esp) # 4-byte Spill
+ adcl 284(%esp), %esi
+ movl %esi, 44(%esp) # 4-byte Spill
+ movl 24(%esp), %esi # 4-byte Reload
+ adcl 288(%esp), %esi
+ movl 40(%esp), %eax # 4-byte Reload
+ adcl 292(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 36(%esp), %eax # 4-byte Reload
+ adcl 296(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 1048(%esp), %eax
+ movl 32(%eax), %eax
+ movl %eax, (%esp)
+ leal 208(%esp), %ecx
+ movl 1044(%esp), %edx
+ calll .LmulPv320x32
+ movl 248(%esp), %edx
+ movl 48(%esp), %ecx # 4-byte Reload
+ addl 208(%esp), %ecx
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 212(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 216(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ adcl 220(%esp), %edi
+ movl %edi, 52(%esp) # 4-byte Spill
+ adcl 224(%esp), %ebp
+ movl 28(%esp), %eax # 4-byte Reload
+ adcl 228(%esp), %eax
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 232(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ adcl 236(%esp), %esi
+ movl %esi, 24(%esp) # 4-byte Spill
+ movl 40(%esp), %eax # 4-byte Reload
+ adcl 240(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 36(%esp), %eax # 4-byte Reload
+ adcl 244(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ adcl $0, %edx
+ movl %edx, 48(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ movl %ecx, %edi
+ imull 20(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 160(%esp), %ecx
+ movl 1052(%esp), %edx
+ calll .LmulPv320x32
+ addl 160(%esp), %edi
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 164(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 168(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 172(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl %ebp, %edi
+ adcl 176(%esp), %edi
+ movl 28(%esp), %esi # 4-byte Reload
+ adcl 180(%esp), %esi
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 184(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 24(%esp), %eax # 4-byte Reload
+ adcl 188(%esp), %eax
+ movl %eax, 24(%esp) # 4-byte Spill
+ movl 40(%esp), %ebp # 4-byte Reload
+ adcl 192(%esp), %ebp
+ movl 36(%esp), %eax # 4-byte Reload
+ adcl 196(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 200(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 1048(%esp), %eax
+ movl 36(%eax), %eax
+ movl %eax, (%esp)
+ leal 112(%esp), %ecx
+ movl 1044(%esp), %edx
+ calll .LmulPv320x32
+ movl 152(%esp), %edx
+ movl 56(%esp), %ecx # 4-byte Reload
+ addl 112(%esp), %ecx
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 116(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 120(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ adcl 124(%esp), %edi
+ movl %edi, 32(%esp) # 4-byte Spill
+ adcl 128(%esp), %esi
+ movl %esi, %edi
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 132(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 24(%esp), %eax # 4-byte Reload
+ adcl 136(%esp), %eax
+ movl %eax, 24(%esp) # 4-byte Spill
+ adcl 140(%esp), %ebp
+ movl %ebp, 40(%esp) # 4-byte Spill
+ movl 36(%esp), %esi # 4-byte Reload
+ adcl 144(%esp), %esi
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 148(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ adcl $0, %edx
+ movl %edx, 56(%esp) # 4-byte Spill
+ movl 20(%esp), %eax # 4-byte Reload
+ imull %ecx, %eax
+ movl %ecx, %ebp
+ movl %eax, (%esp)
+ leal 64(%esp), %ecx
+ movl 1052(%esp), %edx
+ calll .LmulPv320x32
+ addl 64(%esp), %ebp
+ movl %edi, %ebp
+ movl 60(%esp), %eax # 4-byte Reload
+ movl 32(%esp), %ebx # 4-byte Reload
+ adcl 68(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 52(%esp), %ecx # 4-byte Reload
+ adcl 72(%esp), %ecx
+ movl %ecx, 52(%esp) # 4-byte Spill
+ adcl 76(%esp), %ebx
+ adcl 80(%esp), %ebp
+ movl 44(%esp), %edx # 4-byte Reload
+ adcl 84(%esp), %edx
+ movl %edx, 44(%esp) # 4-byte Spill
+ movl 24(%esp), %edx # 4-byte Reload
+ adcl 88(%esp), %edx
+ movl %edx, 24(%esp) # 4-byte Spill
+ movl 40(%esp), %edx # 4-byte Reload
+ adcl 92(%esp), %edx
+ movl %edx, 40(%esp) # 4-byte Spill
+ adcl 96(%esp), %esi
+ movl %esi, 36(%esp) # 4-byte Spill
+ movl 48(%esp), %edx # 4-byte Reload
+ adcl 100(%esp), %edx
+ movl %edx, 48(%esp) # 4-byte Spill
+ movl 56(%esp), %edx # 4-byte Reload
+ adcl 104(%esp), %edx
+ movl %edx, 56(%esp) # 4-byte Spill
+ movl %eax, %edx
+ movl 1052(%esp), %edi
+ subl (%edi), %edx
+ sbbl 4(%edi), %ecx
+ movl %ebx, %eax
+ sbbl 8(%edi), %eax
+ movl %ebp, %esi
+ sbbl 12(%edi), %esi
+ movl %esi, 4(%esp) # 4-byte Spill
+ movl 44(%esp), %esi # 4-byte Reload
+ sbbl 16(%edi), %esi
+ movl %esi, 8(%esp) # 4-byte Spill
+ movl 24(%esp), %esi # 4-byte Reload
+ sbbl 20(%edi), %esi
+ movl %esi, 12(%esp) # 4-byte Spill
+ movl 40(%esp), %esi # 4-byte Reload
+ sbbl 24(%edi), %esi
+ movl %esi, 16(%esp) # 4-byte Spill
+ movl 36(%esp), %esi # 4-byte Reload
+ sbbl 28(%edi), %esi
+ movl %esi, 20(%esp) # 4-byte Spill
+ movl 48(%esp), %esi # 4-byte Reload
+ sbbl 32(%edi), %esi
+ movl %esi, 28(%esp) # 4-byte Spill
+ movl 56(%esp), %esi # 4-byte Reload
+ sbbl 36(%edi), %esi
+ movl %esi, 32(%esp) # 4-byte Spill
+ movl %esi, %edi
+ sarl $31, %edi
+ testl %edi, %edi
+ movl 60(%esp), %edi # 4-byte Reload
+ js .LBB147_2
+# BB#1:
+ movl %edx, %edi
+.LBB147_2:
+ movl 1040(%esp), %edx
+ movl %edi, (%edx)
+ movl 52(%esp), %edi # 4-byte Reload
+ js .LBB147_4
+# BB#3:
+ movl %ecx, %edi
+.LBB147_4:
+ movl %edi, 4(%edx)
+ js .LBB147_6
+# BB#5:
+ movl %eax, %ebx
+.LBB147_6:
+ movl %ebx, 8(%edx)
+ js .LBB147_8
+# BB#7:
+ movl 4(%esp), %ebp # 4-byte Reload
+.LBB147_8:
+ movl %ebp, 12(%edx)
+ movl 44(%esp), %esi # 4-byte Reload
+ movl 24(%esp), %eax # 4-byte Reload
+ js .LBB147_10
+# BB#9:
+ movl 8(%esp), %esi # 4-byte Reload
+.LBB147_10:
+ movl %esi, 16(%edx)
+ js .LBB147_12
+# BB#11:
+ movl 12(%esp), %eax # 4-byte Reload
+.LBB147_12:
+ movl %eax, 20(%edx)
+ movl 40(%esp), %eax # 4-byte Reload
+ js .LBB147_14
+# BB#13:
+ movl 16(%esp), %eax # 4-byte Reload
+.LBB147_14:
+ movl %eax, 24(%edx)
+ movl 36(%esp), %eax # 4-byte Reload
+ js .LBB147_16
+# BB#15:
+ movl 20(%esp), %eax # 4-byte Reload
+.LBB147_16:
+ movl %eax, 28(%edx)
+ movl 48(%esp), %eax # 4-byte Reload
+ js .LBB147_18
+# BB#17:
+ movl 28(%esp), %eax # 4-byte Reload
+.LBB147_18:
+ movl %eax, 32(%edx)
+ movl 56(%esp), %eax # 4-byte Reload
+ js .LBB147_20
+# BB#19:
+ movl 32(%esp), %eax # 4-byte Reload
+.LBB147_20:
+ movl %eax, 36(%edx)
+ addl $1020, %esp # imm = 0x3FC
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end147:
+ .size mcl_fp_montNF10L, .Lfunc_end147-mcl_fp_montNF10L
+
+ .globl mcl_fp_montRed10L
+ .align 16, 0x90
+ .type mcl_fp_montRed10L,@function
+mcl_fp_montRed10L: # @mcl_fp_montRed10L
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $604, %esp # imm = 0x25C
+ calll .L148$pb
+.L148$pb:
+ popl %eax
+.Ltmp19:
+ addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp19-.L148$pb), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 632(%esp), %edx
+ movl -4(%edx), %esi
+ movl %esi, 60(%esp) # 4-byte Spill
+ movl 628(%esp), %ecx
+ movl (%ecx), %ebx
+ movl %ebx, 56(%esp) # 4-byte Spill
+ movl 4(%ecx), %edi
+ movl %edi, 68(%esp) # 4-byte Spill
+ imull %esi, %ebx
+ movl 76(%ecx), %esi
+ movl %esi, 84(%esp) # 4-byte Spill
+ movl 72(%ecx), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 68(%ecx), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 64(%ecx), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 60(%ecx), %esi
+ movl %esi, 104(%esp) # 4-byte Spill
+ movl 56(%ecx), %esi
+ movl %esi, 120(%esp) # 4-byte Spill
+ movl 52(%ecx), %esi
+ movl %esi, 80(%esp) # 4-byte Spill
+ movl 48(%ecx), %esi
+ movl %esi, 116(%esp) # 4-byte Spill
+ movl 44(%ecx), %esi
+ movl %esi, 124(%esp) # 4-byte Spill
+ movl 40(%ecx), %esi
+ movl %esi, 112(%esp) # 4-byte Spill
+ movl 36(%ecx), %esi
+ movl %esi, 92(%esp) # 4-byte Spill
+ movl 32(%ecx), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 28(%ecx), %edi
+ movl %edi, 96(%esp) # 4-byte Spill
+ movl 24(%ecx), %edi
+ movl %edi, 72(%esp) # 4-byte Spill
+ movl 20(%ecx), %ebp
+ movl 16(%ecx), %edi
+ movl 12(%ecx), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 8(%ecx), %esi
+ movl (%edx), %ecx
+ movl %ecx, 12(%esp) # 4-byte Spill
+ movl 36(%edx), %ecx
+ movl %ecx, 44(%esp) # 4-byte Spill
+ movl 32(%edx), %ecx
+ movl %ecx, 40(%esp) # 4-byte Spill
+ movl 28(%edx), %ecx
+ movl %ecx, 36(%esp) # 4-byte Spill
+ movl 24(%edx), %ecx
+ movl %ecx, 32(%esp) # 4-byte Spill
+ movl 20(%edx), %ecx
+ movl %ecx, 28(%esp) # 4-byte Spill
+ movl 16(%edx), %ecx
+ movl %ecx, 24(%esp) # 4-byte Spill
+ movl 12(%edx), %ecx
+ movl %ecx, 20(%esp) # 4-byte Spill
+ movl 8(%edx), %ecx
+ movl %ecx, 16(%esp) # 4-byte Spill
+ movl 4(%edx), %ecx
+ movl %ecx, 8(%esp) # 4-byte Spill
+ movl %ebx, (%esp)
+ leal 560(%esp), %ecx
+ movl 64(%esp), %ebx # 4-byte Reload
+ calll .LmulPv320x32
+ movl 56(%esp), %eax # 4-byte Reload
+ addl 560(%esp), %eax
+ movl 68(%esp), %ecx # 4-byte Reload
+ adcl 564(%esp), %ecx
+ adcl 568(%esp), %esi
+ movl %esi, 4(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 572(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ adcl 576(%esp), %edi
+ movl %edi, 48(%esp) # 4-byte Spill
+ adcl 580(%esp), %ebp
+ movl %ebp, 56(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 584(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 588(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 592(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 596(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 112(%esp), %eax # 4-byte Reload
+ adcl 600(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ adcl $0, 124(%esp) # 4-byte Folded Spill
+ adcl $0, 116(%esp) # 4-byte Folded Spill
+ adcl $0, 80(%esp) # 4-byte Folded Spill
+ adcl $0, 120(%esp) # 4-byte Folded Spill
+ adcl $0, 104(%esp) # 4-byte Folded Spill
+ movl 88(%esp), %ebp # 4-byte Reload
+ adcl $0, %ebp
+ adcl $0, 108(%esp) # 4-byte Folded Spill
+ movl 76(%esp), %edi # 4-byte Reload
+ adcl $0, %edi
+ adcl $0, 84(%esp) # 4-byte Folded Spill
+ sbbl %eax, %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ movl %ecx, %esi
+ imull 60(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 512(%esp), %ecx
+ movl 632(%esp), %edx
+ movl 64(%esp), %ebx # 4-byte Reload
+ calll .LmulPv320x32
+ movl 68(%esp), %eax # 4-byte Reload
+ andl $1, %eax
+ addl 512(%esp), %esi
+ movl 4(%esp), %edx # 4-byte Reload
+ adcl 516(%esp), %edx
+ movl 52(%esp), %ecx # 4-byte Reload
+ adcl 520(%esp), %ecx
+ movl %ecx, 52(%esp) # 4-byte Spill
+ movl 48(%esp), %ecx # 4-byte Reload
+ adcl 524(%esp), %ecx
+ movl %ecx, 48(%esp) # 4-byte Spill
+ movl 56(%esp), %ecx # 4-byte Reload
+ adcl 528(%esp), %ecx
+ movl %ecx, 56(%esp) # 4-byte Spill
+ movl 72(%esp), %ecx # 4-byte Reload
+ adcl 532(%esp), %ecx
+ movl %ecx, 72(%esp) # 4-byte Spill
+ movl 96(%esp), %ecx # 4-byte Reload
+ adcl 536(%esp), %ecx
+ movl %ecx, 96(%esp) # 4-byte Spill
+ movl 100(%esp), %ecx # 4-byte Reload
+ adcl 540(%esp), %ecx
+ movl %ecx, 100(%esp) # 4-byte Spill
+ movl 92(%esp), %ecx # 4-byte Reload
+ adcl 544(%esp), %ecx
+ movl %ecx, 92(%esp) # 4-byte Spill
+ movl 112(%esp), %ecx # 4-byte Reload
+ adcl 548(%esp), %ecx
+ movl %ecx, 112(%esp) # 4-byte Spill
+ movl 124(%esp), %ecx # 4-byte Reload
+ adcl 552(%esp), %ecx
+ movl %ecx, 124(%esp) # 4-byte Spill
+ adcl $0, 116(%esp) # 4-byte Folded Spill
+ adcl $0, 80(%esp) # 4-byte Folded Spill
+ adcl $0, 120(%esp) # 4-byte Folded Spill
+ adcl $0, 104(%esp) # 4-byte Folded Spill
+ adcl $0, %ebp
+ movl %ebp, 88(%esp) # 4-byte Spill
+ adcl $0, 108(%esp) # 4-byte Folded Spill
+ adcl $0, %edi
+ adcl $0, 84(%esp) # 4-byte Folded Spill
+ adcl $0, %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl %edx, %esi
+ movl %esi, %eax
+ imull 60(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 464(%esp), %ecx
+ movl 632(%esp), %edx
+ movl 64(%esp), %ebx # 4-byte Reload
+ calll .LmulPv320x32
+ addl 464(%esp), %esi
+ movl 52(%esp), %ecx # 4-byte Reload
+ adcl 468(%esp), %ecx
+ movl %ecx, 52(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 472(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 476(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 480(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 484(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 488(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 92(%esp), %esi # 4-byte Reload
+ adcl 492(%esp), %esi
+ movl 112(%esp), %eax # 4-byte Reload
+ adcl 496(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 124(%esp), %eax # 4-byte Reload
+ adcl 500(%esp), %eax
+ movl %eax, 124(%esp) # 4-byte Spill
+ movl 116(%esp), %eax # 4-byte Reload
+ adcl 504(%esp), %eax
+ movl %eax, 116(%esp) # 4-byte Spill
+ adcl $0, 80(%esp) # 4-byte Folded Spill
+ movl 120(%esp), %ebp # 4-byte Reload
+ adcl $0, %ebp
+ adcl $0, 104(%esp) # 4-byte Folded Spill
+ adcl $0, 88(%esp) # 4-byte Folded Spill
+ adcl $0, 108(%esp) # 4-byte Folded Spill
+ adcl $0, %edi
+ adcl $0, 84(%esp) # 4-byte Folded Spill
+ adcl $0, 68(%esp) # 4-byte Folded Spill
+ movl %ecx, %eax
+ imull 60(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 416(%esp), %ecx
+ movl 632(%esp), %edx
+ movl 64(%esp), %ebx # 4-byte Reload
+ calll .LmulPv320x32
+ movl 52(%esp), %eax # 4-byte Reload
+ addl 416(%esp), %eax
+ movl 48(%esp), %ecx # 4-byte Reload
+ adcl 420(%esp), %ecx
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 424(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 428(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 432(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 436(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ adcl 440(%esp), %esi
+ movl %esi, 92(%esp) # 4-byte Spill
+ movl 112(%esp), %eax # 4-byte Reload
+ adcl 444(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 124(%esp), %eax # 4-byte Reload
+ adcl 448(%esp), %eax
+ movl %eax, 124(%esp) # 4-byte Spill
+ movl 116(%esp), %eax # 4-byte Reload
+ adcl 452(%esp), %eax
+ movl %eax, 116(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 456(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ adcl $0, %ebp
+ movl %ebp, 120(%esp) # 4-byte Spill
+ adcl $0, 104(%esp) # 4-byte Folded Spill
+ adcl $0, 88(%esp) # 4-byte Folded Spill
+ adcl $0, 108(%esp) # 4-byte Folded Spill
+ adcl $0, %edi
+ movl %edi, 76(%esp) # 4-byte Spill
+ movl 84(%esp), %esi # 4-byte Reload
+ adcl $0, %esi
+ adcl $0, 68(%esp) # 4-byte Folded Spill
+ movl %ecx, %ebp
+ movl %ebp, %eax
+ movl 60(%esp), %edi # 4-byte Reload
+ imull %edi, %eax
+ movl %eax, (%esp)
+ leal 368(%esp), %ecx
+ movl 632(%esp), %eax
+ movl %eax, %edx
+ movl 64(%esp), %ebx # 4-byte Reload
+ calll .LmulPv320x32
+ addl 368(%esp), %ebp
+ movl 56(%esp), %ecx # 4-byte Reload
+ adcl 372(%esp), %ecx
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 376(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 380(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 384(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 388(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 112(%esp), %eax # 4-byte Reload
+ adcl 392(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 124(%esp), %eax # 4-byte Reload
+ adcl 396(%esp), %eax
+ movl %eax, 124(%esp) # 4-byte Spill
+ movl 116(%esp), %eax # 4-byte Reload
+ adcl 400(%esp), %eax
+ movl %eax, 116(%esp) # 4-byte Spill
+ movl 80(%esp), %ebp # 4-byte Reload
+ adcl 404(%esp), %ebp
+ movl 120(%esp), %eax # 4-byte Reload
+ adcl 408(%esp), %eax
+ movl %eax, 120(%esp) # 4-byte Spill
+ adcl $0, 104(%esp) # 4-byte Folded Spill
+ adcl $0, 88(%esp) # 4-byte Folded Spill
+ adcl $0, 108(%esp) # 4-byte Folded Spill
+ adcl $0, 76(%esp) # 4-byte Folded Spill
+ adcl $0, %esi
+ movl %esi, 84(%esp) # 4-byte Spill
+ adcl $0, 68(%esp) # 4-byte Folded Spill
+ movl %ecx, %esi
+ movl %esi, %eax
+ imull %edi, %eax
+ movl %eax, (%esp)
+ leal 320(%esp), %ecx
+ movl 632(%esp), %edx
+ movl 64(%esp), %ebx # 4-byte Reload
+ calll .LmulPv320x32
+ addl 320(%esp), %esi
+ movl 72(%esp), %ecx # 4-byte Reload
+ adcl 324(%esp), %ecx
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 328(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 332(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 336(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 112(%esp), %eax # 4-byte Reload
+ adcl 340(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 124(%esp), %edi # 4-byte Reload
+ adcl 344(%esp), %edi
+ movl 116(%esp), %eax # 4-byte Reload
+ adcl 348(%esp), %eax
+ movl %eax, 116(%esp) # 4-byte Spill
+ adcl 352(%esp), %ebp
+ movl %ebp, 80(%esp) # 4-byte Spill
+ movl 120(%esp), %eax # 4-byte Reload
+ adcl 356(%esp), %eax
+ movl %eax, 120(%esp) # 4-byte Spill
+ movl 104(%esp), %esi # 4-byte Reload
+ adcl 360(%esp), %esi
+ adcl $0, 88(%esp) # 4-byte Folded Spill
+ adcl $0, 108(%esp) # 4-byte Folded Spill
+ adcl $0, 76(%esp) # 4-byte Folded Spill
+ adcl $0, 84(%esp) # 4-byte Folded Spill
+ adcl $0, 68(%esp) # 4-byte Folded Spill
+ movl %ecx, %ebp
+ movl %ebp, %eax
+ imull 60(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 272(%esp), %ecx
+ movl 632(%esp), %edx
+ movl 64(%esp), %ebx # 4-byte Reload
+ calll .LmulPv320x32
+ addl 272(%esp), %ebp
+ movl 96(%esp), %ecx # 4-byte Reload
+ adcl 276(%esp), %ecx
+ movl %ecx, 96(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 280(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 284(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 112(%esp), %ebp # 4-byte Reload
+ adcl 288(%esp), %ebp
+ adcl 292(%esp), %edi
+ movl %edi, 124(%esp) # 4-byte Spill
+ movl 116(%esp), %edi # 4-byte Reload
+ adcl 296(%esp), %edi
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 300(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 120(%esp), %eax # 4-byte Reload
+ adcl 304(%esp), %eax
+ movl %eax, 120(%esp) # 4-byte Spill
+ adcl 308(%esp), %esi
+ movl %esi, 104(%esp) # 4-byte Spill
+ movl 88(%esp), %esi # 4-byte Reload
+ adcl 312(%esp), %esi
+ adcl $0, 108(%esp) # 4-byte Folded Spill
+ adcl $0, 76(%esp) # 4-byte Folded Spill
+ adcl $0, 84(%esp) # 4-byte Folded Spill
+ adcl $0, 68(%esp) # 4-byte Folded Spill
+ movl %ecx, %eax
+ imull 60(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 224(%esp), %ecx
+ movl 632(%esp), %edx
+ movl 64(%esp), %ebx # 4-byte Reload
+ calll .LmulPv320x32
+ movl 96(%esp), %eax # 4-byte Reload
+ addl 224(%esp), %eax
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 228(%esp), %eax
+ movl 92(%esp), %ecx # 4-byte Reload
+ adcl 232(%esp), %ecx
+ movl %ecx, 92(%esp) # 4-byte Spill
+ adcl 236(%esp), %ebp
+ movl %ebp, 112(%esp) # 4-byte Spill
+ movl 124(%esp), %ecx # 4-byte Reload
+ adcl 240(%esp), %ecx
+ movl %ecx, 124(%esp) # 4-byte Spill
+ adcl 244(%esp), %edi
+ movl %edi, 116(%esp) # 4-byte Spill
+ movl 80(%esp), %ebp # 4-byte Reload
+ adcl 248(%esp), %ebp
+ movl 120(%esp), %ecx # 4-byte Reload
+ adcl 252(%esp), %ecx
+ movl %ecx, 120(%esp) # 4-byte Spill
+ movl 104(%esp), %ecx # 4-byte Reload
+ adcl 256(%esp), %ecx
+ movl %ecx, 104(%esp) # 4-byte Spill
+ adcl 260(%esp), %esi
+ movl %esi, 88(%esp) # 4-byte Spill
+ movl 108(%esp), %ecx # 4-byte Reload
+ adcl 264(%esp), %ecx
+ movl %ecx, 108(%esp) # 4-byte Spill
+ adcl $0, 76(%esp) # 4-byte Folded Spill
+ adcl $0, 84(%esp) # 4-byte Folded Spill
+ movl 68(%esp), %esi # 4-byte Reload
+ adcl $0, %esi
+ movl %eax, %edi
+ imull 60(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 176(%esp), %ecx
+ movl 632(%esp), %edx
+ movl 64(%esp), %ebx # 4-byte Reload
+ calll .LmulPv320x32
+ addl 176(%esp), %edi
+ movl 92(%esp), %ecx # 4-byte Reload
+ adcl 180(%esp), %ecx
+ movl 112(%esp), %edi # 4-byte Reload
+ adcl 184(%esp), %edi
+ movl 124(%esp), %eax # 4-byte Reload
+ adcl 188(%esp), %eax
+ movl %eax, 124(%esp) # 4-byte Spill
+ movl 116(%esp), %eax # 4-byte Reload
+ adcl 192(%esp), %eax
+ movl %eax, 116(%esp) # 4-byte Spill
+ adcl 196(%esp), %ebp
+ movl 120(%esp), %eax # 4-byte Reload
+ adcl 200(%esp), %eax
+ movl %eax, 120(%esp) # 4-byte Spill
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 204(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 208(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 212(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 216(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ adcl $0, 84(%esp) # 4-byte Folded Spill
+ adcl $0, %esi
+ movl %esi, 68(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ imull %ecx, %eax
+ movl %ecx, %esi
+ movl %eax, (%esp)
+ leal 128(%esp), %ecx
+ movl 632(%esp), %edx
+ movl 64(%esp), %ebx # 4-byte Reload
+ calll .LmulPv320x32
+ addl 128(%esp), %esi
+ movl %edi, %eax
+ adcl 132(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl %eax, %edi
+ movl 124(%esp), %ecx # 4-byte Reload
+ adcl 136(%esp), %ecx
+ movl %ecx, 124(%esp) # 4-byte Spill
+ movl 116(%esp), %esi # 4-byte Reload
+ adcl 140(%esp), %esi
+ movl %esi, 116(%esp) # 4-byte Spill
+ adcl 144(%esp), %ebp
+ movl %ebp, 80(%esp) # 4-byte Spill
+ movl %ebp, %edx
+ movl 120(%esp), %eax # 4-byte Reload
+ adcl 148(%esp), %eax
+ movl %eax, 120(%esp) # 4-byte Spill
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 152(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 156(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 160(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 76(%esp), %ebx # 4-byte Reload
+ adcl 164(%esp), %ebx
+ movl %ebx, 76(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 168(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 68(%esp), %ebp # 4-byte Reload
+ adcl $0, %ebp
+ subl 12(%esp), %edi # 4-byte Folded Reload
+ sbbl 8(%esp), %ecx # 4-byte Folded Reload
+ sbbl 16(%esp), %esi # 4-byte Folded Reload
+ sbbl 20(%esp), %edx # 4-byte Folded Reload
+ movl 120(%esp), %eax # 4-byte Reload
+ sbbl 24(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 104(%esp), %eax # 4-byte Reload
+ sbbl 28(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ sbbl 32(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ sbbl 36(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl %ebp, %eax
+ sbbl 40(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 100(%esp) # 4-byte Spill
+ movl 84(%esp), %ebx # 4-byte Reload
+ sbbl 44(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 96(%esp) # 4-byte Spill
+ sbbl $0, %eax
+ andl $1, %eax
+ jne .LBB148_2
+# BB#1:
+ movl %edx, 80(%esp) # 4-byte Spill
+.LBB148_2:
+ testb %al, %al
+ movl 112(%esp), %edx # 4-byte Reload
+ jne .LBB148_4
+# BB#3:
+ movl %edi, %edx
+.LBB148_4:
+ movl 624(%esp), %edi
+ movl %edx, (%edi)
+ movl 108(%esp), %edx # 4-byte Reload
+ jne .LBB148_6
+# BB#5:
+ movl %ecx, 124(%esp) # 4-byte Spill
+.LBB148_6:
+ movl 124(%esp), %ecx # 4-byte Reload
+ movl %ecx, 4(%edi)
+ movl 116(%esp), %ecx # 4-byte Reload
+ jne .LBB148_8
+# BB#7:
+ movl %esi, %ecx
+.LBB148_8:
+ movl %ecx, 8(%edi)
+ movl 80(%esp), %eax # 4-byte Reload
+ movl %eax, 12(%edi)
+ movl 76(%esp), %ecx # 4-byte Reload
+ movl 120(%esp), %eax # 4-byte Reload
+ jne .LBB148_10
+# BB#9:
+ movl 64(%esp), %eax # 4-byte Reload
+.LBB148_10:
+ movl %eax, 16(%edi)
+ movl 84(%esp), %eax # 4-byte Reload
+ movl 104(%esp), %ebp # 4-byte Reload
+ jne .LBB148_12
+# BB#11:
+ movl 68(%esp), %ebp # 4-byte Reload
+.LBB148_12:
+ movl %ebp, 20(%edi)
+ movl 88(%esp), %ebx # 4-byte Reload
+ jne .LBB148_14
+# BB#13:
+ movl 72(%esp), %ebx # 4-byte Reload
+.LBB148_14:
+ movl %ebx, 24(%edi)
+ jne .LBB148_16
+# BB#15:
+ movl 92(%esp), %edx # 4-byte Reload
+.LBB148_16:
+ movl %edx, 28(%edi)
+ jne .LBB148_18
+# BB#17:
+ movl 100(%esp), %ecx # 4-byte Reload
+.LBB148_18:
+ movl %ecx, 32(%edi)
+ jne .LBB148_20
+# BB#19:
+ movl 96(%esp), %eax # 4-byte Reload
+.LBB148_20:
+ movl %eax, 36(%edi)
+ addl $604, %esp # imm = 0x25C
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end148:
+ .size mcl_fp_montRed10L, .Lfunc_end148-mcl_fp_montRed10L
+
+ .globl mcl_fp_addPre10L
+ .align 16, 0x90
+ .type mcl_fp_addPre10L,@function
+mcl_fp_addPre10L: # @mcl_fp_addPre10L
+# BB#0:
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ movl 24(%esp), %eax
+ movl (%eax), %edx
+ movl 4(%eax), %esi
+ movl 20(%esp), %ecx
+ addl (%ecx), %edx
+ adcl 4(%ecx), %esi
+ movl 8(%eax), %edi
+ adcl 8(%ecx), %edi
+ movl 16(%esp), %ebx
+ movl %edx, (%ebx)
+ movl 12(%ecx), %edx
+ movl %esi, 4(%ebx)
+ movl 16(%ecx), %esi
+ adcl 12(%eax), %edx
+ adcl 16(%eax), %esi
+ movl %edi, 8(%ebx)
+ movl 20(%eax), %edi
+ movl %edx, 12(%ebx)
+ movl 20(%ecx), %edx
+ adcl %edi, %edx
+ movl 24(%eax), %edi
+ movl %esi, 16(%ebx)
+ movl 24(%ecx), %esi
+ adcl %edi, %esi
+ movl 28(%eax), %edi
+ movl %edx, 20(%ebx)
+ movl 28(%ecx), %edx
+ adcl %edi, %edx
+ movl 32(%eax), %edi
+ movl %esi, 24(%ebx)
+ movl 32(%ecx), %esi
+ adcl %edi, %esi
+ movl %edx, 28(%ebx)
+ movl %esi, 32(%ebx)
+ movl 36(%eax), %eax
+ movl 36(%ecx), %ecx
+ adcl %eax, %ecx
+ movl %ecx, 36(%ebx)
+ sbbl %eax, %eax
+ andl $1, %eax
+ popl %esi
+ popl %edi
+ popl %ebx
+ retl
+.Lfunc_end149:
+ .size mcl_fp_addPre10L, .Lfunc_end149-mcl_fp_addPre10L
+
+ .globl mcl_fp_subPre10L
+ .align 16, 0x90
+ .type mcl_fp_subPre10L,@function
+mcl_fp_subPre10L: # @mcl_fp_subPre10L
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ movl 24(%esp), %ecx
+ movl (%ecx), %esi
+ movl 4(%ecx), %edi
+ xorl %eax, %eax
+ movl 28(%esp), %edx
+ subl (%edx), %esi
+ sbbl 4(%edx), %edi
+ movl 8(%ecx), %ebx
+ sbbl 8(%edx), %ebx
+ movl 20(%esp), %ebp
+ movl %esi, (%ebp)
+ movl 12(%ecx), %esi
+ sbbl 12(%edx), %esi
+ movl %edi, 4(%ebp)
+ movl 16(%ecx), %edi
+ sbbl 16(%edx), %edi
+ movl %ebx, 8(%ebp)
+ movl 20(%edx), %ebx
+ movl %esi, 12(%ebp)
+ movl 20(%ecx), %esi
+ sbbl %ebx, %esi
+ movl 24(%edx), %ebx
+ movl %edi, 16(%ebp)
+ movl 24(%ecx), %edi
+ sbbl %ebx, %edi
+ movl 28(%edx), %ebx
+ movl %esi, 20(%ebp)
+ movl 28(%ecx), %esi
+ sbbl %ebx, %esi
+ movl 32(%edx), %ebx
+ movl %edi, 24(%ebp)
+ movl 32(%ecx), %edi
+ sbbl %ebx, %edi
+ movl %esi, 28(%ebp)
+ movl %edi, 32(%ebp)
+ movl 36(%edx), %edx
+ movl 36(%ecx), %ecx
+ sbbl %edx, %ecx
+ movl %ecx, 36(%ebp)
+ sbbl $0, %eax
+ andl $1, %eax
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end150:
+ .size mcl_fp_subPre10L, .Lfunc_end150-mcl_fp_subPre10L
+
+ .globl mcl_fp_shr1_10L
+ .align 16, 0x90
+ .type mcl_fp_shr1_10L,@function
+mcl_fp_shr1_10L: # @mcl_fp_shr1_10L
+# BB#0:
+ pushl %esi
+ movl 12(%esp), %eax
+ movl (%eax), %ecx
+ movl 4(%eax), %edx
+ shrdl $1, %edx, %ecx
+ movl 8(%esp), %esi
+ movl %ecx, (%esi)
+ movl 8(%eax), %ecx
+ shrdl $1, %ecx, %edx
+ movl %edx, 4(%esi)
+ movl 12(%eax), %edx
+ shrdl $1, %edx, %ecx
+ movl %ecx, 8(%esi)
+ movl 16(%eax), %ecx
+ shrdl $1, %ecx, %edx
+ movl %edx, 12(%esi)
+ movl 20(%eax), %edx
+ shrdl $1, %edx, %ecx
+ movl %ecx, 16(%esi)
+ movl 24(%eax), %ecx
+ shrdl $1, %ecx, %edx
+ movl %edx, 20(%esi)
+ movl 28(%eax), %edx
+ shrdl $1, %edx, %ecx
+ movl %ecx, 24(%esi)
+ movl 32(%eax), %ecx
+ shrdl $1, %ecx, %edx
+ movl %edx, 28(%esi)
+ movl 36(%eax), %eax
+ shrdl $1, %eax, %ecx
+ movl %ecx, 32(%esi)
+ shrl %eax
+ movl %eax, 36(%esi)
+ popl %esi
+ retl
+.Lfunc_end151:
+ .size mcl_fp_shr1_10L, .Lfunc_end151-mcl_fp_shr1_10L
+
+ .globl mcl_fp_add10L
+ .align 16, 0x90
+ .type mcl_fp_add10L,@function
+mcl_fp_add10L: # @mcl_fp_add10L
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $24, %esp
+ movl 52(%esp), %edi
+ movl (%edi), %ecx
+ movl 4(%edi), %eax
+ movl 48(%esp), %ebx
+ addl (%ebx), %ecx
+ movl %ecx, (%esp) # 4-byte Spill
+ adcl 4(%ebx), %eax
+ movl %eax, 20(%esp) # 4-byte Spill
+ movl 8(%edi), %eax
+ adcl 8(%ebx), %eax
+ movl %eax, 16(%esp) # 4-byte Spill
+ movl 12(%ebx), %ecx
+ movl 16(%ebx), %eax
+ adcl 12(%edi), %ecx
+ movl %ecx, 12(%esp) # 4-byte Spill
+ adcl 16(%edi), %eax
+ movl %eax, 8(%esp) # 4-byte Spill
+ movl 20(%ebx), %eax
+ adcl 20(%edi), %eax
+ movl %eax, 4(%esp) # 4-byte Spill
+ movl 24(%ebx), %esi
+ adcl 24(%edi), %esi
+ movl 28(%ebx), %ebp
+ adcl 28(%edi), %ebp
+ movl 32(%ebx), %edx
+ adcl 32(%edi), %edx
+ movl 36(%ebx), %ecx
+ adcl 36(%edi), %ecx
+ movl 44(%esp), %edi
+ movl (%esp), %ebx # 4-byte Reload
+ movl %ebx, (%edi)
+ movl 20(%esp), %eax # 4-byte Reload
+ movl %eax, 4(%edi)
+ movl 16(%esp), %eax # 4-byte Reload
+ movl %eax, 8(%edi)
+ movl 12(%esp), %eax # 4-byte Reload
+ movl %eax, 12(%edi)
+ movl 8(%esp), %eax # 4-byte Reload
+ movl %eax, 16(%edi)
+ movl 4(%esp), %eax # 4-byte Reload
+ movl %eax, 20(%edi)
+ movl %esi, 24(%edi)
+ movl %ebp, 28(%edi)
+ movl %edx, 32(%edi)
+ movl %ecx, 36(%edi)
+ sbbl %eax, %eax
+ andl $1, %eax
+ movl 56(%esp), %edi
+ subl (%edi), %ebx
+ movl %ebx, (%esp) # 4-byte Spill
+ movl 20(%esp), %ebx # 4-byte Reload
+ sbbl 4(%edi), %ebx
+ movl %ebx, 20(%esp) # 4-byte Spill
+ movl 16(%esp), %ebx # 4-byte Reload
+ sbbl 8(%edi), %ebx
+ movl %ebx, 16(%esp) # 4-byte Spill
+ movl 12(%esp), %ebx # 4-byte Reload
+ sbbl 12(%edi), %ebx
+ movl %ebx, 12(%esp) # 4-byte Spill
+ movl 8(%esp), %ebx # 4-byte Reload
+ sbbl 16(%edi), %ebx
+ movl %ebx, 8(%esp) # 4-byte Spill
+ movl 4(%esp), %ebx # 4-byte Reload
+ sbbl 20(%edi), %ebx
+ movl %ebx, 4(%esp) # 4-byte Spill
+ sbbl 24(%edi), %esi
+ sbbl 28(%edi), %ebp
+ sbbl 32(%edi), %edx
+ sbbl 36(%edi), %ecx
+ sbbl $0, %eax
+ testb $1, %al
+ jne .LBB152_2
+# BB#1: # %nocarry
+ movl (%esp), %edi # 4-byte Reload
+ movl 44(%esp), %ebx
+ movl %edi, (%ebx)
+ movl 20(%esp), %edi # 4-byte Reload
+ movl %edi, 4(%ebx)
+ movl 16(%esp), %edi # 4-byte Reload
+ movl %edi, 8(%ebx)
+ movl 12(%esp), %edi # 4-byte Reload
+ movl %edi, 12(%ebx)
+ movl 8(%esp), %edi # 4-byte Reload
+ movl %edi, 16(%ebx)
+ movl 4(%esp), %eax # 4-byte Reload
+ movl %eax, 20(%ebx)
+ movl %esi, 24(%ebx)
+ movl %ebp, 28(%ebx)
+ movl %edx, 32(%ebx)
+ movl %ecx, 36(%ebx)
+.LBB152_2: # %carry
+ addl $24, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end152:
+ .size mcl_fp_add10L, .Lfunc_end152-mcl_fp_add10L
+
+ .globl mcl_fp_addNF10L
+ .align 16, 0x90
+ .type mcl_fp_addNF10L,@function
+mcl_fp_addNF10L: # @mcl_fp_addNF10L
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $72, %esp
+ movl 100(%esp), %ecx
+ movl (%ecx), %eax
+ movl 4(%ecx), %esi
+ movl 96(%esp), %edx
+ addl (%edx), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ adcl 4(%edx), %esi
+ movl %esi, 56(%esp) # 4-byte Spill
+ movl 36(%ecx), %edi
+ movl 32(%ecx), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 28(%ecx), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 24(%ecx), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 20(%ecx), %ebp
+ movl 16(%ecx), %ebx
+ movl 12(%ecx), %eax
+ movl 8(%ecx), %esi
+ adcl 8(%edx), %esi
+ adcl 12(%edx), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ adcl 16(%edx), %ebx
+ movl %ebx, 44(%esp) # 4-byte Spill
+ adcl 20(%edx), %ebp
+ movl %ebp, 48(%esp) # 4-byte Spill
+ movl 60(%esp), %ecx # 4-byte Reload
+ adcl 24(%edx), %ecx
+ movl %ecx, 60(%esp) # 4-byte Spill
+ movl 68(%esp), %ecx # 4-byte Reload
+ adcl 28(%edx), %ecx
+ movl %ecx, 68(%esp) # 4-byte Spill
+ movl 64(%esp), %ecx # 4-byte Reload
+ adcl 32(%edx), %ecx
+ movl %ecx, 64(%esp) # 4-byte Spill
+ movl %esi, %ecx
+ adcl 36(%edx), %edi
+ movl %edi, 36(%esp) # 4-byte Spill
+ movl 104(%esp), %edi
+ movl 52(%esp), %edx # 4-byte Reload
+ subl (%edi), %edx
+ movl 56(%esp), %esi # 4-byte Reload
+ sbbl 4(%edi), %esi
+ movl %esi, (%esp) # 4-byte Spill
+ movl %ecx, %esi
+ sbbl 8(%edi), %esi
+ movl %esi, 4(%esp) # 4-byte Spill
+ sbbl 12(%edi), %eax
+ movl %eax, 8(%esp) # 4-byte Spill
+ sbbl 16(%edi), %ebx
+ movl %ebx, 12(%esp) # 4-byte Spill
+ sbbl 20(%edi), %ebp
+ movl %ebp, 16(%esp) # 4-byte Spill
+ movl 60(%esp), %esi # 4-byte Reload
+ movl %esi, %eax
+ movl %esi, %ebp
+ sbbl 24(%edi), %eax
+ movl %eax, 20(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ sbbl 28(%edi), %eax
+ movl %eax, 24(%esp) # 4-byte Spill
+ movl 64(%esp), %esi # 4-byte Reload
+ movl %esi, %eax
+ movl %esi, %ebx
+ sbbl 32(%edi), %eax
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl 36(%esp), %eax # 4-byte Reload
+ movl %eax, %esi
+ sbbl 36(%edi), %esi
+ movl %esi, 32(%esp) # 4-byte Spill
+ movl %esi, %edi
+ movl 52(%esp), %esi # 4-byte Reload
+ sarl $31, %edi
+ testl %edi, %edi
+ js .LBB153_2
+# BB#1:
+ movl %edx, %esi
+.LBB153_2:
+ movl 92(%esp), %edx
+ movl %esi, (%edx)
+ movl 56(%esp), %esi # 4-byte Reload
+ js .LBB153_4
+# BB#3:
+ movl (%esp), %esi # 4-byte Reload
+.LBB153_4:
+ movl %esi, 4(%edx)
+ movl %ebp, %edi
+ movl 40(%esp), %esi # 4-byte Reload
+ js .LBB153_6
+# BB#5:
+ movl 4(%esp), %ecx # 4-byte Reload
+.LBB153_6:
+ movl %ecx, 8(%edx)
+ movl %ebx, %ecx
+ movl 44(%esp), %ebp # 4-byte Reload
+ js .LBB153_8
+# BB#7:
+ movl 8(%esp), %esi # 4-byte Reload
+.LBB153_8:
+ movl %esi, 12(%edx)
+ movl 68(%esp), %esi # 4-byte Reload
+ movl 48(%esp), %ebx # 4-byte Reload
+ js .LBB153_10
+# BB#9:
+ movl 12(%esp), %ebp # 4-byte Reload
+.LBB153_10:
+ movl %ebp, 16(%edx)
+ js .LBB153_12
+# BB#11:
+ movl 16(%esp), %ebx # 4-byte Reload
+.LBB153_12:
+ movl %ebx, 20(%edx)
+ js .LBB153_14
+# BB#13:
+ movl 20(%esp), %edi # 4-byte Reload
+.LBB153_14:
+ movl %edi, 24(%edx)
+ js .LBB153_16
+# BB#15:
+ movl 24(%esp), %esi # 4-byte Reload
+.LBB153_16:
+ movl %esi, 28(%edx)
+ js .LBB153_18
+# BB#17:
+ movl 28(%esp), %ecx # 4-byte Reload
+.LBB153_18:
+ movl %ecx, 32(%edx)
+ js .LBB153_20
+# BB#19:
+ movl 32(%esp), %eax # 4-byte Reload
+.LBB153_20:
+ movl %eax, 36(%edx)
+ addl $72, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end153:
+ .size mcl_fp_addNF10L, .Lfunc_end153-mcl_fp_addNF10L
+
+ .globl mcl_fp_sub10L
+ .align 16, 0x90
+ .type mcl_fp_sub10L,@function
+mcl_fp_sub10L: # @mcl_fp_sub10L
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $32, %esp
+ movl 56(%esp), %esi
+ movl (%esi), %ecx
+ movl 4(%esi), %eax
+ xorl %ebx, %ebx
+ movl 60(%esp), %edi
+ subl (%edi), %ecx
+ movl %ecx, 12(%esp) # 4-byte Spill
+ sbbl 4(%edi), %eax
+ movl %eax, 16(%esp) # 4-byte Spill
+ movl 8(%esi), %eax
+ sbbl 8(%edi), %eax
+ movl %eax, 24(%esp) # 4-byte Spill
+ movl 12(%esi), %eax
+ sbbl 12(%edi), %eax
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl 16(%esi), %eax
+ sbbl 16(%edi), %eax
+ movl %eax, 20(%esp) # 4-byte Spill
+ movl 20(%esi), %edx
+ sbbl 20(%edi), %edx
+ movl %edx, 8(%esp) # 4-byte Spill
+ movl 24(%esi), %ecx
+ sbbl 24(%edi), %ecx
+ movl %ecx, 4(%esp) # 4-byte Spill
+ movl 28(%esi), %eax
+ sbbl 28(%edi), %eax
+ movl %eax, (%esp) # 4-byte Spill
+ movl 32(%esi), %ebp
+ sbbl 32(%edi), %ebp
+ movl 36(%esi), %esi
+ sbbl 36(%edi), %esi
+ sbbl $0, %ebx
+ testb $1, %bl
+ movl 52(%esp), %ebx
+ movl 12(%esp), %edi # 4-byte Reload
+ movl %edi, (%ebx)
+ movl 16(%esp), %edi # 4-byte Reload
+ movl %edi, 4(%ebx)
+ movl 24(%esp), %edi # 4-byte Reload
+ movl %edi, 8(%ebx)
+ movl 28(%esp), %edi # 4-byte Reload
+ movl %edi, 12(%ebx)
+ movl 20(%esp), %edi # 4-byte Reload
+ movl %edi, 16(%ebx)
+ movl %edx, 20(%ebx)
+ movl %ecx, 24(%ebx)
+ movl %eax, 28(%ebx)
+ movl %ebp, 32(%ebx)
+ movl %esi, 36(%ebx)
+ je .LBB154_2
+# BB#1: # %carry
+ movl %esi, %edi
+ movl 64(%esp), %esi
+ movl 12(%esp), %ecx # 4-byte Reload
+ addl (%esi), %ecx
+ movl %ecx, (%ebx)
+ movl 16(%esp), %edx # 4-byte Reload
+ adcl 4(%esi), %edx
+ movl %edx, 4(%ebx)
+ movl 24(%esp), %ecx # 4-byte Reload
+ adcl 8(%esi), %ecx
+ movl 12(%esi), %eax
+ adcl 28(%esp), %eax # 4-byte Folded Reload
+ movl %ecx, 8(%ebx)
+ movl 16(%esi), %ecx
+ adcl 20(%esp), %ecx # 4-byte Folded Reload
+ movl %eax, 12(%ebx)
+ movl 20(%esi), %eax
+ adcl 8(%esp), %eax # 4-byte Folded Reload
+ movl %ecx, 16(%ebx)
+ movl 24(%esi), %ecx
+ adcl 4(%esp), %ecx # 4-byte Folded Reload
+ movl %eax, 20(%ebx)
+ movl 28(%esi), %eax
+ adcl (%esp), %eax # 4-byte Folded Reload
+ movl %ecx, 24(%ebx)
+ movl %eax, 28(%ebx)
+ movl 32(%esi), %eax
+ adcl %ebp, %eax
+ movl %eax, 32(%ebx)
+ movl 36(%esi), %eax
+ adcl %edi, %eax
+ movl %eax, 36(%ebx)
+.LBB154_2: # %nocarry
+ addl $32, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end154:
+ .size mcl_fp_sub10L, .Lfunc_end154-mcl_fp_sub10L
+
+ .globl mcl_fp_subNF10L
+ .align 16, 0x90
+ .type mcl_fp_subNF10L,@function
+mcl_fp_subNF10L: # @mcl_fp_subNF10L
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $56, %esp
+ movl 80(%esp), %eax
+ movl 36(%eax), %esi
+ movl (%eax), %edi
+ movl 4(%eax), %edx
+ movl 84(%esp), %ecx
+ subl (%ecx), %edi
+ movl %edi, 36(%esp) # 4-byte Spill
+ sbbl 4(%ecx), %edx
+ movl %edx, 40(%esp) # 4-byte Spill
+ movl 32(%eax), %edx
+ movl %edx, 48(%esp) # 4-byte Spill
+ movl 28(%eax), %edi
+ movl 24(%eax), %ebx
+ movl 20(%eax), %ebp
+ movl 16(%eax), %edx
+ movl %edx, 52(%esp) # 4-byte Spill
+ movl 12(%eax), %edx
+ movl 8(%eax), %eax
+ sbbl 8(%ecx), %eax
+ movl %eax, 16(%esp) # 4-byte Spill
+ sbbl 12(%ecx), %edx
+ movl %edx, 24(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ sbbl 16(%ecx), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ sbbl 20(%ecx), %ebp
+ movl %ebp, 28(%esp) # 4-byte Spill
+ sbbl 24(%ecx), %ebx
+ movl %ebx, 32(%esp) # 4-byte Spill
+ sbbl 28(%ecx), %edi
+ movl %edi, 44(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ sbbl 32(%ecx), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ sbbl 36(%ecx), %esi
+ movl %esi, 20(%esp) # 4-byte Spill
+ movl %esi, %eax
+ sarl $31, %eax
+ movl %eax, %edx
+ addl %edx, %edx
+ movl %eax, %ecx
+ adcl %ecx, %ecx
+ movl %esi, %ebx
+ shrl $31, %ebx
+ orl %edx, %ebx
+ movl 88(%esp), %edi
+ movl 20(%edi), %edx
+ andl %ecx, %edx
+ movl %edx, 12(%esp) # 4-byte Spill
+ movl 12(%edi), %edx
+ andl %ecx, %edx
+ movl %edx, 4(%esp) # 4-byte Spill
+ andl 4(%edi), %ecx
+ movl 16(%edi), %edx
+ andl %ebx, %edx
+ movl %edx, (%esp) # 4-byte Spill
+ movl 8(%edi), %edx
+ andl %ebx, %edx
+ andl (%edi), %ebx
+ movl 36(%edi), %esi
+ andl %eax, %esi
+ movl %esi, 8(%esp) # 4-byte Spill
+ movl 32(%edi), %ebp
+ andl %eax, %ebp
+ movl 28(%edi), %esi
+ andl %eax, %esi
+ andl 24(%edi), %eax
+ addl 36(%esp), %ebx # 4-byte Folded Reload
+ adcl 40(%esp), %ecx # 4-byte Folded Reload
+ movl 76(%esp), %edi
+ movl %ebx, (%edi)
+ adcl 16(%esp), %edx # 4-byte Folded Reload
+ movl %ecx, 4(%edi)
+ movl 4(%esp), %ecx # 4-byte Reload
+ adcl 24(%esp), %ecx # 4-byte Folded Reload
+ movl %edx, 8(%edi)
+ movl (%esp), %edx # 4-byte Reload
+ adcl 52(%esp), %edx # 4-byte Folded Reload
+ movl %ecx, 12(%edi)
+ movl 12(%esp), %ecx # 4-byte Reload
+ adcl 28(%esp), %ecx # 4-byte Folded Reload
+ movl %edx, 16(%edi)
+ adcl 32(%esp), %eax # 4-byte Folded Reload
+ movl %ecx, 20(%edi)
+ adcl 44(%esp), %esi # 4-byte Folded Reload
+ movl %eax, 24(%edi)
+ adcl 48(%esp), %ebp # 4-byte Folded Reload
+ movl %esi, 28(%edi)
+ movl %ebp, 32(%edi)
+ movl 8(%esp), %eax # 4-byte Reload
+ adcl 20(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 36(%edi)
+ addl $56, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end155:
+ .size mcl_fp_subNF10L, .Lfunc_end155-mcl_fp_subNF10L
+
+ .globl mcl_fpDbl_add10L
+ .align 16, 0x90
+ .type mcl_fpDbl_add10L,@function
+mcl_fpDbl_add10L: # @mcl_fpDbl_add10L
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $72, %esp
+ movl 100(%esp), %edx
+ movl 96(%esp), %edi
+ movl 12(%edi), %esi
+ movl 16(%edi), %ecx
+ movl 8(%edx), %ebx
+ movl (%edx), %ebp
+ addl (%edi), %ebp
+ movl 92(%esp), %eax
+ movl %ebp, (%eax)
+ movl 4(%edx), %ebp
+ adcl 4(%edi), %ebp
+ adcl 8(%edi), %ebx
+ adcl 12(%edx), %esi
+ adcl 16(%edx), %ecx
+ movl %ebp, 4(%eax)
+ movl 48(%edx), %ebp
+ movl %ebx, 8(%eax)
+ movl 20(%edx), %ebx
+ movl %esi, 12(%eax)
+ movl 20(%edi), %esi
+ adcl %ebx, %esi
+ movl 24(%edx), %ebx
+ movl %ecx, 16(%eax)
+ movl 24(%edi), %ecx
+ adcl %ebx, %ecx
+ movl 28(%edx), %ebx
+ movl %esi, 20(%eax)
+ movl 28(%edi), %esi
+ adcl %ebx, %esi
+ movl 32(%edx), %ebx
+ movl %ecx, 24(%eax)
+ movl 32(%edi), %ecx
+ adcl %ebx, %ecx
+ movl 36(%edx), %ebx
+ movl %esi, 28(%eax)
+ movl 36(%edi), %esi
+ adcl %ebx, %esi
+ movl 40(%edx), %ebx
+ movl %ecx, 32(%eax)
+ movl 40(%edi), %ecx
+ adcl %ebx, %ecx
+ movl %ecx, 64(%esp) # 4-byte Spill
+ movl 44(%edx), %ebx
+ movl %esi, 36(%eax)
+ movl 44(%edi), %eax
+ adcl %ebx, %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 48(%edi), %eax
+ adcl %ebp, %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 52(%edx), %eax
+ movl 52(%edi), %ecx
+ adcl %eax, %ecx
+ movl %ecx, 56(%esp) # 4-byte Spill
+ movl 56(%edx), %eax
+ movl 56(%edi), %ecx
+ adcl %eax, %ecx
+ movl %ecx, 60(%esp) # 4-byte Spill
+ movl 60(%edx), %eax
+ movl 60(%edi), %ecx
+ adcl %eax, %ecx
+ movl 64(%edx), %esi
+ movl 64(%edi), %eax
+ adcl %esi, %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 68(%edx), %ebx
+ movl 68(%edi), %esi
+ adcl %ebx, %esi
+ movl %esi, 44(%esp) # 4-byte Spill
+ movl 72(%edx), %ebx
+ movl 72(%edi), %ebp
+ adcl %ebx, %ebp
+ movl %ebp, 48(%esp) # 4-byte Spill
+ movl 76(%edx), %edx
+ movl 76(%edi), %edi
+ adcl %edx, %edi
+ movl %edi, 36(%esp) # 4-byte Spill
+ sbbl %edx, %edx
+ andl $1, %edx
+ movl 104(%esp), %ebx
+ movl 64(%esp), %edi # 4-byte Reload
+ subl (%ebx), %edi
+ movl %edi, 20(%esp) # 4-byte Spill
+ movl 68(%esp), %edi # 4-byte Reload
+ sbbl 4(%ebx), %edi
+ movl %edi, 16(%esp) # 4-byte Spill
+ movl 52(%esp), %edi # 4-byte Reload
+ sbbl 8(%ebx), %edi
+ movl %edi, 12(%esp) # 4-byte Spill
+ movl 56(%esp), %edi # 4-byte Reload
+ sbbl 12(%ebx), %edi
+ movl %edi, 8(%esp) # 4-byte Spill
+ movl 60(%esp), %edi # 4-byte Reload
+ sbbl 16(%ebx), %edi
+ movl %edi, 4(%esp) # 4-byte Spill
+ movl %ecx, %edi
+ sbbl 20(%ebx), %edi
+ movl %edi, (%esp) # 4-byte Spill
+ sbbl 24(%ebx), %eax
+ movl %eax, 24(%esp) # 4-byte Spill
+ sbbl 28(%ebx), %esi
+ movl %esi, 28(%esp) # 4-byte Spill
+ movl %ebp, %eax
+ movl 36(%esp), %ebp # 4-byte Reload
+ sbbl 32(%ebx), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl %ebp, %edi
+ sbbl 36(%ebx), %edi
+ sbbl $0, %edx
+ andl $1, %edx
+ jne .LBB156_2
+# BB#1:
+ movl %edi, %ebp
+.LBB156_2:
+ testb %dl, %dl
+ movl 64(%esp), %edx # 4-byte Reload
+ movl 60(%esp), %esi # 4-byte Reload
+ movl 56(%esp), %edi # 4-byte Reload
+ movl 52(%esp), %ebx # 4-byte Reload
+ jne .LBB156_4
+# BB#3:
+ movl (%esp), %ecx # 4-byte Reload
+ movl 4(%esp), %esi # 4-byte Reload
+ movl 8(%esp), %edi # 4-byte Reload
+ movl 12(%esp), %ebx # 4-byte Reload
+ movl 16(%esp), %eax # 4-byte Reload
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 20(%esp), %edx # 4-byte Reload
+.LBB156_4:
+ movl 92(%esp), %eax
+ movl %edx, 40(%eax)
+ movl 68(%esp), %edx # 4-byte Reload
+ movl %edx, 44(%eax)
+ movl %ebx, 48(%eax)
+ movl %edi, 52(%eax)
+ movl %esi, 56(%eax)
+ movl %ecx, 60(%eax)
+ movl 44(%esp), %edx # 4-byte Reload
+ movl 40(%esp), %ecx # 4-byte Reload
+ jne .LBB156_6
+# BB#5:
+ movl 24(%esp), %ecx # 4-byte Reload
+.LBB156_6:
+ movl %ecx, 64(%eax)
+ movl 48(%esp), %ecx # 4-byte Reload
+ jne .LBB156_8
+# BB#7:
+ movl 28(%esp), %edx # 4-byte Reload
+.LBB156_8:
+ movl %edx, 68(%eax)
+ jne .LBB156_10
+# BB#9:
+ movl 32(%esp), %ecx # 4-byte Reload
+.LBB156_10:
+ movl %ecx, 72(%eax)
+ movl %ebp, 76(%eax)
+ addl $72, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end156:
+ .size mcl_fpDbl_add10L, .Lfunc_end156-mcl_fpDbl_add10L
+
+ .globl mcl_fpDbl_sub10L
+ .align 16, 0x90
+ .type mcl_fpDbl_sub10L,@function
+mcl_fpDbl_sub10L: # @mcl_fpDbl_sub10L
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $60, %esp
+ movl 84(%esp), %ebp
+ movl (%ebp), %edx
+ movl 4(%ebp), %esi
+ movl 88(%esp), %eax
+ subl (%eax), %edx
+ sbbl 4(%eax), %esi
+ movl 8(%ebp), %edi
+ sbbl 8(%eax), %edi
+ movl 80(%esp), %ecx
+ movl %edx, (%ecx)
+ movl 12(%ebp), %edx
+ sbbl 12(%eax), %edx
+ movl %esi, 4(%ecx)
+ movl 16(%ebp), %esi
+ sbbl 16(%eax), %esi
+ movl %edi, 8(%ecx)
+ movl 20(%eax), %edi
+ movl %edx, 12(%ecx)
+ movl 20(%ebp), %edx
+ sbbl %edi, %edx
+ movl 24(%eax), %edi
+ movl %esi, 16(%ecx)
+ movl 24(%ebp), %esi
+ sbbl %edi, %esi
+ movl 28(%eax), %edi
+ movl %edx, 20(%ecx)
+ movl 28(%ebp), %edx
+ sbbl %edi, %edx
+ movl 32(%eax), %edi
+ movl %esi, 24(%ecx)
+ movl 32(%ebp), %esi
+ sbbl %edi, %esi
+ movl 36(%eax), %edi
+ movl %edx, 28(%ecx)
+ movl 36(%ebp), %edx
+ sbbl %edi, %edx
+ movl 40(%eax), %edi
+ movl %esi, 32(%ecx)
+ movl 40(%ebp), %esi
+ sbbl %edi, %esi
+ movl %esi, 28(%esp) # 4-byte Spill
+ movl 44(%eax), %esi
+ movl %edx, 36(%ecx)
+ movl 44(%ebp), %edx
+ sbbl %esi, %edx
+ movl %edx, 20(%esp) # 4-byte Spill
+ movl 48(%eax), %edx
+ movl 48(%ebp), %esi
+ sbbl %edx, %esi
+ movl %esi, 24(%esp) # 4-byte Spill
+ movl 52(%eax), %edx
+ movl 52(%ebp), %esi
+ sbbl %edx, %esi
+ movl %esi, 32(%esp) # 4-byte Spill
+ movl 56(%eax), %edx
+ movl 56(%ebp), %esi
+ sbbl %edx, %esi
+ movl %esi, 36(%esp) # 4-byte Spill
+ movl 60(%eax), %edx
+ movl 60(%ebp), %esi
+ sbbl %edx, %esi
+ movl %esi, 40(%esp) # 4-byte Spill
+ movl 64(%eax), %edx
+ movl 64(%ebp), %esi
+ sbbl %edx, %esi
+ movl %esi, 44(%esp) # 4-byte Spill
+ movl 68(%eax), %edx
+ movl 68(%ebp), %esi
+ sbbl %edx, %esi
+ movl %esi, 48(%esp) # 4-byte Spill
+ movl 72(%eax), %edx
+ movl 72(%ebp), %esi
+ sbbl %edx, %esi
+ movl %esi, 52(%esp) # 4-byte Spill
+ movl 76(%eax), %eax
+ movl 76(%ebp), %edx
+ sbbl %eax, %edx
+ movl %edx, 56(%esp) # 4-byte Spill
+ movl $0, %eax
+ sbbl $0, %eax
+ andl $1, %eax
+ movl 92(%esp), %esi
+ jne .LBB157_1
+# BB#2:
+ movl $0, 16(%esp) # 4-byte Folded Spill
+ jmp .LBB157_3
+.LBB157_1:
+ movl 36(%esi), %edx
+ movl %edx, 16(%esp) # 4-byte Spill
+.LBB157_3:
+ testb %al, %al
+ jne .LBB157_4
+# BB#5:
+ movl $0, 8(%esp) # 4-byte Folded Spill
+ movl $0, %ebx
+ jmp .LBB157_6
+.LBB157_4:
+ movl (%esi), %ebx
+ movl 4(%esi), %eax
+ movl %eax, 8(%esp) # 4-byte Spill
+.LBB157_6:
+ jne .LBB157_7
+# BB#8:
+ movl $0, 12(%esp) # 4-byte Folded Spill
+ jmp .LBB157_9
+.LBB157_7:
+ movl 32(%esi), %eax
+ movl %eax, 12(%esp) # 4-byte Spill
+.LBB157_9:
+ jne .LBB157_10
+# BB#11:
+ movl $0, 4(%esp) # 4-byte Folded Spill
+ jmp .LBB157_12
+.LBB157_10:
+ movl 28(%esi), %eax
+ movl %eax, 4(%esp) # 4-byte Spill
+.LBB157_12:
+ jne .LBB157_13
+# BB#14:
+ movl $0, (%esp) # 4-byte Folded Spill
+ jmp .LBB157_15
+.LBB157_13:
+ movl 24(%esi), %eax
+ movl %eax, (%esp) # 4-byte Spill
+.LBB157_15:
+ jne .LBB157_16
+# BB#17:
+ movl $0, %ebp
+ jmp .LBB157_18
+.LBB157_16:
+ movl 20(%esi), %ebp
+.LBB157_18:
+ jne .LBB157_19
+# BB#20:
+ movl $0, %eax
+ jmp .LBB157_21
+.LBB157_19:
+ movl 16(%esi), %eax
+.LBB157_21:
+ jne .LBB157_22
+# BB#23:
+ movl $0, %edx
+ jmp .LBB157_24
+.LBB157_22:
+ movl 12(%esi), %edx
+.LBB157_24:
+ jne .LBB157_25
+# BB#26:
+ xorl %esi, %esi
+ jmp .LBB157_27
+.LBB157_25:
+ movl 8(%esi), %esi
+.LBB157_27:
+ addl 28(%esp), %ebx # 4-byte Folded Reload
+ movl 8(%esp), %edi # 4-byte Reload
+ adcl 20(%esp), %edi # 4-byte Folded Reload
+ movl %ebx, 40(%ecx)
+ adcl 24(%esp), %esi # 4-byte Folded Reload
+ movl %edi, 44(%ecx)
+ adcl 32(%esp), %edx # 4-byte Folded Reload
+ movl %esi, 48(%ecx)
+ adcl 36(%esp), %eax # 4-byte Folded Reload
+ movl %edx, 52(%ecx)
+ adcl 40(%esp), %ebp # 4-byte Folded Reload
+ movl %eax, 56(%ecx)
+ movl (%esp), %eax # 4-byte Reload
+ adcl 44(%esp), %eax # 4-byte Folded Reload
+ movl %ebp, 60(%ecx)
+ movl 4(%esp), %edx # 4-byte Reload
+ adcl 48(%esp), %edx # 4-byte Folded Reload
+ movl %eax, 64(%ecx)
+ movl 12(%esp), %eax # 4-byte Reload
+ adcl 52(%esp), %eax # 4-byte Folded Reload
+ movl %edx, 68(%ecx)
+ movl %eax, 72(%ecx)
+ movl 16(%esp), %eax # 4-byte Reload
+ adcl 56(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 76(%ecx)
+ addl $60, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end157:
+ .size mcl_fpDbl_sub10L, .Lfunc_end157-mcl_fpDbl_sub10L
+
+ .align 16, 0x90
+ .type .LmulPv352x32,@function
+.LmulPv352x32: # @mulPv352x32
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $72, %esp
+ movl %edx, %ebx
+ movl 92(%esp), %edi
+ movl %edi, %eax
+ mull 40(%ebx)
+ movl %edx, 68(%esp) # 4-byte Spill
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl %edi, %eax
+ mull 36(%ebx)
+ movl %edx, 60(%esp) # 4-byte Spill
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl %edi, %eax
+ mull 32(%ebx)
+ movl %edx, 52(%esp) # 4-byte Spill
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl %edi, %eax
+ mull 28(%ebx)
+ movl %edx, 44(%esp) # 4-byte Spill
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl %edi, %eax
+ mull 24(%ebx)
+ movl %edx, 36(%esp) # 4-byte Spill
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl %edi, %eax
+ mull 20(%ebx)
+ movl %edx, 28(%esp) # 4-byte Spill
+ movl %eax, 24(%esp) # 4-byte Spill
+ movl %edi, %eax
+ mull 16(%ebx)
+ movl %edx, 20(%esp) # 4-byte Spill
+ movl %eax, 16(%esp) # 4-byte Spill
+ movl %edi, %eax
+ mull 12(%ebx)
+ movl %edx, 12(%esp) # 4-byte Spill
+ movl %eax, 8(%esp) # 4-byte Spill
+ movl %edi, %eax
+ mull 8(%ebx)
+ movl %edx, %esi
+ movl %eax, 4(%esp) # 4-byte Spill
+ movl %edi, %eax
+ mull 4(%ebx)
+ movl %edx, %ebp
+ movl %eax, (%esp) # 4-byte Spill
+ movl %edi, %eax
+ mull (%ebx)
+ movl %eax, (%ecx)
+ addl (%esp), %edx # 4-byte Folded Reload
+ movl %edx, 4(%ecx)
+ adcl 4(%esp), %ebp # 4-byte Folded Reload
+ movl %ebp, 8(%ecx)
+ adcl 8(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 12(%ecx)
+ movl 12(%esp), %eax # 4-byte Reload
+ adcl 16(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 16(%ecx)
+ movl 20(%esp), %eax # 4-byte Reload
+ adcl 24(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 20(%ecx)
+ movl 28(%esp), %eax # 4-byte Reload
+ adcl 32(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 24(%ecx)
+ movl 36(%esp), %eax # 4-byte Reload
+ adcl 40(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 28(%ecx)
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 48(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 32(%ecx)
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 56(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 36(%ecx)
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 64(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 40(%ecx)
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl $0, %eax
+ movl %eax, 44(%ecx)
+ movl %ecx, %eax
+ addl $72, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end158:
+ .size .LmulPv352x32, .Lfunc_end158-.LmulPv352x32
+
+ .globl mcl_fp_mulUnitPre11L
+ .align 16, 0x90
+ .type mcl_fp_mulUnitPre11L,@function
+mcl_fp_mulUnitPre11L: # @mcl_fp_mulUnitPre11L
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $92, %esp
+ calll .L159$pb
+.L159$pb:
+ popl %ebx
+.Ltmp20:
+ addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp20-.L159$pb), %ebx
+ movl 120(%esp), %eax
+ movl %eax, (%esp)
+ leal 40(%esp), %ecx
+ movl 116(%esp), %edx
+ calll .LmulPv352x32
+ movl 84(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 80(%esp), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 76(%esp), %eax
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl 72(%esp), %eax
+ movl %eax, 24(%esp) # 4-byte Spill
+ movl 68(%esp), %eax
+ movl %eax, 20(%esp) # 4-byte Spill
+ movl 64(%esp), %eax
+ movl %eax, 16(%esp) # 4-byte Spill
+ movl 60(%esp), %ebp
+ movl 56(%esp), %ebx
+ movl 52(%esp), %edi
+ movl 48(%esp), %esi
+ movl 40(%esp), %edx
+ movl 44(%esp), %ecx
+ movl 112(%esp), %eax
+ movl %edx, (%eax)
+ movl %ecx, 4(%eax)
+ movl %esi, 8(%eax)
+ movl %edi, 12(%eax)
+ movl %ebx, 16(%eax)
+ movl %ebp, 20(%eax)
+ movl 16(%esp), %ecx # 4-byte Reload
+ movl %ecx, 24(%eax)
+ movl 20(%esp), %ecx # 4-byte Reload
+ movl %ecx, 28(%eax)
+ movl 24(%esp), %ecx # 4-byte Reload
+ movl %ecx, 32(%eax)
+ movl 28(%esp), %ecx # 4-byte Reload
+ movl %ecx, 36(%eax)
+ movl 32(%esp), %ecx # 4-byte Reload
+ movl %ecx, 40(%eax)
+ movl 36(%esp), %ecx # 4-byte Reload
+ movl %ecx, 44(%eax)
+ addl $92, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end159:
+ .size mcl_fp_mulUnitPre11L, .Lfunc_end159-mcl_fp_mulUnitPre11L
+
+ .globl mcl_fpDbl_mulPre11L
+ .align 16, 0x90
+ .type mcl_fpDbl_mulPre11L,@function
+mcl_fpDbl_mulPre11L: # @mcl_fpDbl_mulPre11L
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $620, %esp # imm = 0x26C
+ calll .L160$pb
+.L160$pb:
+ popl %eax
+.Ltmp21:
+ addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp21-.L160$pb), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl %eax, %ebx
+ movl 648(%esp), %eax
+ movl (%eax), %eax
+ movl %eax, (%esp)
+ leal 568(%esp), %ecx
+ movl 644(%esp), %edx
+ movl %edx, %ebp
+ movl %ebx, %edi
+ calll .LmulPv352x32
+ movl 612(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 608(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 604(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 600(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 596(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 592(%esp), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 588(%esp), %eax
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl 584(%esp), %eax
+ movl %eax, 24(%esp) # 4-byte Spill
+ movl 580(%esp), %eax
+ movl %eax, 20(%esp) # 4-byte Spill
+ movl 576(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 568(%esp), %eax
+ movl 572(%esp), %esi
+ movl 640(%esp), %ecx
+ movl %eax, (%ecx)
+ movl 648(%esp), %eax
+ movl 4(%eax), %eax
+ movl %eax, (%esp)
+ leal 520(%esp), %ecx
+ movl %ebp, %edx
+ movl %edi, %ebx
+ calll .LmulPv352x32
+ addl 520(%esp), %esi
+ movl %esi, 16(%esp) # 4-byte Spill
+ movl 564(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 560(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 556(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 552(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 548(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 544(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 540(%esp), %ebx
+ movl 536(%esp), %edi
+ movl 532(%esp), %esi
+ movl 524(%esp), %ecx
+ movl 528(%esp), %edx
+ movl 640(%esp), %eax
+ movl 16(%esp), %ebp # 4-byte Reload
+ movl %ebp, 4(%eax)
+ adcl 56(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 56(%esp) # 4-byte Spill
+ adcl 20(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 20(%esp) # 4-byte Spill
+ adcl 24(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 24(%esp) # 4-byte Spill
+ adcl 28(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 28(%esp) # 4-byte Spill
+ adcl 32(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 12(%esp) # 4-byte Spill
+ movl 40(%esp), %eax # 4-byte Reload
+ adcl %eax, 44(%esp) # 4-byte Folded Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 52(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 60(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 68(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 76(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 80(%esp) # 4-byte Spill
+ adcl $0, 36(%esp) # 4-byte Folded Spill
+ movl 648(%esp), %eax
+ movl 8(%eax), %eax
+ movl %eax, (%esp)
+ leal 472(%esp), %ecx
+ movl 644(%esp), %edx
+ movl 84(%esp), %ebx # 4-byte Reload
+ calll .LmulPv352x32
+ movl 56(%esp), %eax # 4-byte Reload
+ addl 472(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 516(%esp), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 512(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 508(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 504(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 500(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 496(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 492(%esp), %ebp
+ movl 488(%esp), %edi
+ movl 484(%esp), %esi
+ movl 476(%esp), %ecx
+ movl 480(%esp), %edx
+ movl 640(%esp), %eax
+ movl 56(%esp), %ebx # 4-byte Reload
+ movl %ebx, 8(%eax)
+ adcl 20(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 56(%esp) # 4-byte Spill
+ adcl 24(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 16(%esp) # 4-byte Spill
+ adcl 28(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 20(%esp) # 4-byte Spill
+ adcl 12(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 24(%esp) # 4-byte Spill
+ adcl 44(%esp), %ebp # 4-byte Folded Reload
+ movl %ebp, 44(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl %eax, 40(%esp) # 4-byte Folded Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 64(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 72(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 80(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 36(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 76(%esp) # 4-byte Spill
+ adcl $0, 32(%esp) # 4-byte Folded Spill
+ movl 648(%esp), %eax
+ movl 12(%eax), %eax
+ movl %eax, (%esp)
+ leal 424(%esp), %ecx
+ movl 644(%esp), %edx
+ movl 84(%esp), %ebx # 4-byte Reload
+ calll .LmulPv352x32
+ movl 56(%esp), %eax # 4-byte Reload
+ addl 424(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 468(%esp), %eax
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl 464(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 460(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 456(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 452(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 448(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 444(%esp), %ebx
+ movl 440(%esp), %edi
+ movl 436(%esp), %esi
+ movl 428(%esp), %ecx
+ movl 432(%esp), %edx
+ movl 640(%esp), %eax
+ movl 56(%esp), %ebp # 4-byte Reload
+ movl %ebp, 12(%eax)
+ adcl 16(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, %ebp
+ adcl 20(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 56(%esp) # 4-byte Spill
+ adcl 24(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 20(%esp) # 4-byte Spill
+ adcl 44(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 24(%esp) # 4-byte Spill
+ adcl 40(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 12(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl %eax, 36(%esp) # 4-byte Folded Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 60(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 68(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 76(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 32(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 80(%esp) # 4-byte Spill
+ adcl $0, 28(%esp) # 4-byte Folded Spill
+ movl 648(%esp), %eax
+ movl 16(%eax), %eax
+ movl %eax, (%esp)
+ leal 376(%esp), %ecx
+ movl 644(%esp), %edx
+ movl 84(%esp), %ebx # 4-byte Reload
+ calll .LmulPv352x32
+ addl 376(%esp), %ebp
+ movl %ebp, 52(%esp) # 4-byte Spill
+ movl 420(%esp), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 416(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 412(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 408(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 404(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 400(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 396(%esp), %ebp
+ movl 392(%esp), %edi
+ movl 388(%esp), %esi
+ movl 380(%esp), %ecx
+ movl 384(%esp), %edx
+ movl 640(%esp), %eax
+ movl 52(%esp), %ebx # 4-byte Reload
+ movl %ebx, 16(%eax)
+ adcl 56(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 56(%esp) # 4-byte Spill
+ adcl 20(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 16(%esp) # 4-byte Spill
+ adcl 24(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 20(%esp) # 4-byte Spill
+ adcl 12(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 24(%esp) # 4-byte Spill
+ adcl 36(%esp), %ebp # 4-byte Folded Reload
+ movl %ebp, 12(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl %eax, 40(%esp) # 4-byte Folded Spill
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 64(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 72(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 80(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 28(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 76(%esp) # 4-byte Spill
+ adcl $0, 32(%esp) # 4-byte Folded Spill
+ movl 648(%esp), %eax
+ movl 20(%eax), %eax
+ movl %eax, (%esp)
+ leal 328(%esp), %ecx
+ movl 644(%esp), %edx
+ movl 84(%esp), %ebx # 4-byte Reload
+ calll .LmulPv352x32
+ movl 56(%esp), %eax # 4-byte Reload
+ addl 328(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 372(%esp), %eax
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl 368(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 364(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 360(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 356(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 352(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 348(%esp), %ebx
+ movl 344(%esp), %edi
+ movl 340(%esp), %esi
+ movl 332(%esp), %ecx
+ movl 336(%esp), %edx
+ movl 640(%esp), %eax
+ movl 56(%esp), %ebp # 4-byte Reload
+ movl %ebp, 20(%eax)
+ adcl 16(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, %ebp
+ adcl 20(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 56(%esp) # 4-byte Spill
+ adcl 24(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 20(%esp) # 4-byte Spill
+ adcl 12(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 24(%esp) # 4-byte Spill
+ adcl 40(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 40(%esp) # 4-byte Spill
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl %eax, 36(%esp) # 4-byte Folded Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 60(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 68(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 76(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 32(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 80(%esp) # 4-byte Spill
+ adcl $0, 28(%esp) # 4-byte Folded Spill
+ movl 648(%esp), %eax
+ movl 24(%eax), %eax
+ movl %eax, (%esp)
+ leal 280(%esp), %ecx
+ movl 644(%esp), %edx
+ movl 84(%esp), %ebx # 4-byte Reload
+ calll .LmulPv352x32
+ addl 280(%esp), %ebp
+ movl %ebp, 16(%esp) # 4-byte Spill
+ movl 324(%esp), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 320(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 316(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 312(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 308(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 304(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 300(%esp), %ebp
+ movl 296(%esp), %edi
+ movl 292(%esp), %esi
+ movl 284(%esp), %ecx
+ movl 288(%esp), %edx
+ movl 640(%esp), %eax
+ movl 16(%esp), %ebx # 4-byte Reload
+ movl %ebx, 24(%eax)
+ adcl 56(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 56(%esp) # 4-byte Spill
+ adcl 20(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 16(%esp) # 4-byte Spill
+ adcl 24(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 20(%esp) # 4-byte Spill
+ adcl 40(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 24(%esp) # 4-byte Spill
+ adcl 36(%esp), %ebp # 4-byte Folded Reload
+ movl %ebp, 36(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl %eax, 44(%esp) # 4-byte Folded Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 64(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 72(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 80(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 28(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 76(%esp) # 4-byte Spill
+ adcl $0, 32(%esp) # 4-byte Folded Spill
+ movl 648(%esp), %eax
+ movl 28(%eax), %eax
+ movl %eax, (%esp)
+ leal 232(%esp), %ecx
+ movl 644(%esp), %edx
+ movl 84(%esp), %ebx # 4-byte Reload
+ calll .LmulPv352x32
+ movl 56(%esp), %eax # 4-byte Reload
+ addl 232(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 276(%esp), %eax
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl 272(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 268(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 264(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 260(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 256(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 252(%esp), %ebx
+ movl 248(%esp), %edi
+ movl 244(%esp), %esi
+ movl 236(%esp), %ecx
+ movl 240(%esp), %edx
+ movl 640(%esp), %eax
+ movl 56(%esp), %ebp # 4-byte Reload
+ movl %ebp, 28(%eax)
+ adcl 16(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, %ebp
+ adcl 20(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 56(%esp) # 4-byte Spill
+ adcl 24(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 16(%esp) # 4-byte Spill
+ adcl 36(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 20(%esp) # 4-byte Spill
+ adcl 44(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 44(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl %eax, 40(%esp) # 4-byte Folded Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 60(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 68(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 76(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 32(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 80(%esp) # 4-byte Spill
+ adcl $0, 28(%esp) # 4-byte Folded Spill
+ movl 648(%esp), %eax
+ movl 32(%eax), %eax
+ movl %eax, (%esp)
+ leal 184(%esp), %ecx
+ movl 644(%esp), %edx
+ movl 84(%esp), %ebx # 4-byte Reload
+ calll .LmulPv352x32
+ addl 184(%esp), %ebp
+ movl %ebp, 32(%esp) # 4-byte Spill
+ movl 228(%esp), %eax
+ movl %eax, 24(%esp) # 4-byte Spill
+ movl 224(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 220(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 216(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 212(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 208(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 204(%esp), %ebp
+ movl 200(%esp), %edi
+ movl 196(%esp), %esi
+ movl 188(%esp), %ecx
+ movl 192(%esp), %edx
+ movl 640(%esp), %eax
+ movl 32(%esp), %ebx # 4-byte Reload
+ movl %ebx, 32(%eax)
+ adcl 56(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 56(%esp) # 4-byte Spill
+ adcl 16(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 12(%esp) # 4-byte Spill
+ adcl 20(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 16(%esp) # 4-byte Spill
+ adcl 44(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 20(%esp) # 4-byte Spill
+ adcl 40(%esp), %ebp # 4-byte Folded Reload
+ movl %ebp, 8(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl %eax, 36(%esp) # 4-byte Folded Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 64(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 72(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 80(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 28(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 76(%esp) # 4-byte Spill
+ adcl $0, 24(%esp) # 4-byte Folded Spill
+ movl 648(%esp), %edi
+ movl 36(%edi), %eax
+ movl %eax, (%esp)
+ leal 136(%esp), %ecx
+ movl 644(%esp), %eax
+ movl %eax, %edx
+ movl 84(%esp), %ebx # 4-byte Reload
+ calll .LmulPv352x32
+ movl 56(%esp), %eax # 4-byte Reload
+ addl 136(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 180(%esp), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 176(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 172(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 168(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 164(%esp), %ebp
+ movl 160(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 156(%esp), %edi
+ movl 152(%esp), %esi
+ movl 148(%esp), %edx
+ movl 140(%esp), %ecx
+ movl 144(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 640(%esp), %eax
+ movl 56(%esp), %ebx # 4-byte Reload
+ movl %ebx, 36(%eax)
+ adcl 12(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 52(%esp) # 4-byte Spill
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 16(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 44(%esp) # 4-byte Spill
+ adcl 20(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 56(%esp) # 4-byte Spill
+ adcl 8(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 28(%esp) # 4-byte Spill
+ adcl 36(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 36(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl %eax, 40(%esp) # 4-byte Folded Spill
+ adcl 60(%esp), %ebp # 4-byte Folded Reload
+ movl %ebp, 60(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 68(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 76(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 24(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 80(%esp) # 4-byte Spill
+ adcl $0, 32(%esp) # 4-byte Folded Spill
+ movl 648(%esp), %eax
+ movl 40(%eax), %eax
+ movl %eax, (%esp)
+ leal 88(%esp), %ecx
+ movl 644(%esp), %edx
+ movl 84(%esp), %ebx # 4-byte Reload
+ calll .LmulPv352x32
+ movl 52(%esp), %eax # 4-byte Reload
+ addl 88(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 44(%esp), %ebp # 4-byte Reload
+ adcl 92(%esp), %ebp
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 96(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 132(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 128(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 124(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 120(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 116(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 112(%esp), %edi
+ movl 108(%esp), %esi
+ movl 104(%esp), %edx
+ movl 100(%esp), %ecx
+ movl 640(%esp), %eax
+ movl 52(%esp), %ebx # 4-byte Reload
+ movl %ebx, 40(%eax)
+ movl %ebp, 44(%eax)
+ adcl 28(%esp), %ecx # 4-byte Folded Reload
+ movl 56(%esp), %ebp # 4-byte Reload
+ movl %ebp, 48(%eax)
+ adcl 36(%esp), %edx # 4-byte Folded Reload
+ movl %ecx, 52(%eax)
+ adcl 40(%esp), %esi # 4-byte Folded Reload
+ movl %edx, 56(%eax)
+ adcl 60(%esp), %edi # 4-byte Folded Reload
+ movl %esi, 60(%eax)
+ movl 44(%esp), %ecx # 4-byte Reload
+ adcl 72(%esp), %ecx # 4-byte Folded Reload
+ movl %edi, 64(%eax)
+ movl 48(%esp), %edx # 4-byte Reload
+ adcl 64(%esp), %edx # 4-byte Folded Reload
+ movl %ecx, 68(%eax)
+ movl 68(%esp), %ecx # 4-byte Reload
+ adcl 80(%esp), %ecx # 4-byte Folded Reload
+ movl %edx, 72(%eax)
+ movl %ecx, 76(%eax)
+ movl 76(%esp), %ecx # 4-byte Reload
+ adcl 32(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 80(%eax)
+ movl 84(%esp), %ecx # 4-byte Reload
+ adcl $0, %ecx
+ movl %ecx, 84(%eax)
+ addl $620, %esp # imm = 0x26C
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end160:
+ .size mcl_fpDbl_mulPre11L, .Lfunc_end160-mcl_fpDbl_mulPre11L
+
+ .globl mcl_fpDbl_sqrPre11L
+ .align 16, 0x90
+ .type mcl_fpDbl_sqrPre11L,@function
+mcl_fpDbl_sqrPre11L: # @mcl_fpDbl_sqrPre11L
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $620, %esp # imm = 0x26C
+ calll .L161$pb
+.L161$pb:
+ popl %ebx
+.Ltmp22:
+ addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp22-.L161$pb), %ebx
+ movl %ebx, 84(%esp) # 4-byte Spill
+ movl 644(%esp), %edx
+ movl (%edx), %eax
+ movl %eax, (%esp)
+ leal 568(%esp), %ecx
+ movl %edx, %esi
+ movl %ebx, %edi
+ calll .LmulPv352x32
+ movl 612(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 608(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 604(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 600(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 596(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 592(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 588(%esp), %eax
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl 584(%esp), %eax
+ movl %eax, 24(%esp) # 4-byte Spill
+ movl 580(%esp), %eax
+ movl %eax, 20(%esp) # 4-byte Spill
+ movl 576(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 568(%esp), %eax
+ movl 572(%esp), %ebp
+ movl 640(%esp), %ecx
+ movl %eax, (%ecx)
+ movl %esi, %edx
+ movl 4(%edx), %eax
+ movl %eax, (%esp)
+ leal 520(%esp), %ecx
+ movl %edi, %ebx
+ calll .LmulPv352x32
+ addl 520(%esp), %ebp
+ movl %ebp, 16(%esp) # 4-byte Spill
+ movl 564(%esp), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 560(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 556(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 552(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 548(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 544(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 540(%esp), %ebx
+ movl 536(%esp), %edi
+ movl 532(%esp), %esi
+ movl 524(%esp), %ecx
+ movl 528(%esp), %edx
+ movl 640(%esp), %eax
+ movl 16(%esp), %ebp # 4-byte Reload
+ movl %ebp, 4(%eax)
+ adcl 60(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 60(%esp) # 4-byte Spill
+ adcl 20(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 16(%esp) # 4-byte Spill
+ adcl 24(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 20(%esp) # 4-byte Spill
+ adcl 28(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 24(%esp) # 4-byte Spill
+ adcl 36(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 12(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl %eax, 40(%esp) # 4-byte Folded Spill
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 52(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 56(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 64(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 72(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 80(%esp) # 4-byte Spill
+ adcl $0, 32(%esp) # 4-byte Folded Spill
+ movl 644(%esp), %edx
+ movl 8(%edx), %eax
+ movl %eax, (%esp)
+ leal 472(%esp), %ecx
+ movl 84(%esp), %ebx # 4-byte Reload
+ calll .LmulPv352x32
+ movl 60(%esp), %eax # 4-byte Reload
+ addl 472(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 516(%esp), %eax
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl 512(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 508(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 504(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 500(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 496(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 492(%esp), %ebp
+ movl 488(%esp), %edi
+ movl 484(%esp), %esi
+ movl 476(%esp), %ecx
+ movl 480(%esp), %edx
+ movl 640(%esp), %eax
+ movl 60(%esp), %ebx # 4-byte Reload
+ movl %ebx, 8(%eax)
+ adcl 16(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 48(%esp) # 4-byte Spill
+ adcl 20(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 16(%esp) # 4-byte Spill
+ adcl 24(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 20(%esp) # 4-byte Spill
+ adcl 12(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 24(%esp) # 4-byte Spill
+ adcl 40(%esp), %ebp # 4-byte Folded Reload
+ movl %ebp, 12(%esp) # 4-byte Spill
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl %eax, 36(%esp) # 4-byte Folded Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 68(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 76(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 80(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 32(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 72(%esp) # 4-byte Spill
+ adcl $0, 28(%esp) # 4-byte Folded Spill
+ movl 644(%esp), %edx
+ movl 12(%edx), %eax
+ movl %eax, (%esp)
+ leal 424(%esp), %ecx
+ movl 84(%esp), %ebx # 4-byte Reload
+ calll .LmulPv352x32
+ movl 48(%esp), %eax # 4-byte Reload
+ addl 424(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 468(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 464(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 460(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 456(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 452(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 448(%esp), %ebx
+ movl 444(%esp), %edi
+ movl 440(%esp), %esi
+ movl 436(%esp), %edx
+ movl 428(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 432(%esp), %ecx
+ movl 640(%esp), %eax
+ movl 48(%esp), %ebp # 4-byte Reload
+ movl %ebp, 12(%eax)
+ movl 16(%esp), %eax # 4-byte Reload
+ adcl %eax, 80(%esp) # 4-byte Folded Spill
+ adcl 20(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 20(%esp) # 4-byte Spill
+ adcl 24(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 24(%esp) # 4-byte Spill
+ adcl 12(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 32(%esp) # 4-byte Spill
+ adcl 36(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 12(%esp) # 4-byte Spill
+ adcl 52(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 52(%esp) # 4-byte Spill
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 56(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 64(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 72(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 28(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 76(%esp) # 4-byte Spill
+ adcl $0, 40(%esp) # 4-byte Folded Spill
+ movl 644(%esp), %edx
+ movl 16(%edx), %eax
+ movl %eax, (%esp)
+ leal 376(%esp), %ecx
+ movl 84(%esp), %ebx # 4-byte Reload
+ calll .LmulPv352x32
+ movl 80(%esp), %eax # 4-byte Reload
+ addl 376(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 420(%esp), %eax
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl 416(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 412(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 408(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 404(%esp), %ebx
+ movl 400(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 396(%esp), %edi
+ movl 392(%esp), %esi
+ movl 388(%esp), %edx
+ movl 380(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 384(%esp), %ecx
+ movl 640(%esp), %eax
+ movl 80(%esp), %ebp # 4-byte Reload
+ movl %ebp, 16(%eax)
+ movl 48(%esp), %ebp # 4-byte Reload
+ adcl 20(%esp), %ebp # 4-byte Folded Reload
+ adcl 24(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 16(%esp) # 4-byte Spill
+ adcl 32(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 20(%esp) # 4-byte Spill
+ adcl 12(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 80(%esp) # 4-byte Spill
+ adcl 52(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 8(%esp) # 4-byte Spill
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl %eax, 36(%esp) # 4-byte Folded Spill
+ adcl 60(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 44(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 68(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 76(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 40(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 72(%esp) # 4-byte Spill
+ adcl $0, 28(%esp) # 4-byte Folded Spill
+ movl 644(%esp), %edx
+ movl 20(%edx), %eax
+ movl %eax, (%esp)
+ leal 328(%esp), %ecx
+ movl 84(%esp), %ebx # 4-byte Reload
+ calll .LmulPv352x32
+ addl 328(%esp), %ebp
+ movl %ebp, 48(%esp) # 4-byte Spill
+ movl 372(%esp), %eax
+ movl %eax, 24(%esp) # 4-byte Spill
+ movl 368(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 364(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 360(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 356(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 352(%esp), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 348(%esp), %ebp
+ movl 344(%esp), %edi
+ movl 340(%esp), %esi
+ movl 332(%esp), %ecx
+ movl 336(%esp), %edx
+ movl 640(%esp), %eax
+ movl 48(%esp), %ebx # 4-byte Reload
+ movl %ebx, 20(%eax)
+ adcl 16(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 48(%esp) # 4-byte Spill
+ adcl 20(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 12(%esp) # 4-byte Spill
+ adcl 80(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 16(%esp) # 4-byte Spill
+ adcl 8(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 20(%esp) # 4-byte Spill
+ adcl 36(%esp), %ebp # 4-byte Folded Reload
+ movl %ebp, 8(%esp) # 4-byte Spill
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl %eax, 32(%esp) # 4-byte Folded Spill
+ movl 40(%esp), %eax # 4-byte Reload
+ adcl 56(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 64(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 72(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 28(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 68(%esp) # 4-byte Spill
+ adcl $0, 24(%esp) # 4-byte Folded Spill
+ movl 644(%esp), %edx
+ movl 24(%edx), %eax
+ movl %eax, (%esp)
+ leal 280(%esp), %ecx
+ movl 84(%esp), %ebx # 4-byte Reload
+ calll .LmulPv352x32
+ movl 48(%esp), %eax # 4-byte Reload
+ addl 280(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 324(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 320(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 316(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 312(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 308(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 304(%esp), %ebx
+ movl 300(%esp), %edi
+ movl 296(%esp), %esi
+ movl 292(%esp), %edx
+ movl 284(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 288(%esp), %ecx
+ movl 640(%esp), %eax
+ movl 48(%esp), %ebp # 4-byte Reload
+ movl %ebp, 24(%eax)
+ movl 12(%esp), %eax # 4-byte Reload
+ adcl %eax, 80(%esp) # 4-byte Folded Spill
+ adcl 16(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 16(%esp) # 4-byte Spill
+ adcl 20(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 28(%esp) # 4-byte Spill
+ adcl 8(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 48(%esp) # 4-byte Spill
+ adcl 32(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 12(%esp) # 4-byte Spill
+ adcl 40(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 8(%esp) # 4-byte Spill
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 52(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 60(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 68(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 24(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 76(%esp) # 4-byte Spill
+ adcl $0, 36(%esp) # 4-byte Folded Spill
+ movl 644(%esp), %edx
+ movl 28(%edx), %eax
+ movl %eax, (%esp)
+ leal 232(%esp), %ecx
+ movl 84(%esp), %ebx # 4-byte Reload
+ calll .LmulPv352x32
+ movl 80(%esp), %eax # 4-byte Reload
+ addl 232(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 276(%esp), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 272(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 268(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 264(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 260(%esp), %ebx
+ movl 256(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 252(%esp), %edi
+ movl 248(%esp), %esi
+ movl 244(%esp), %edx
+ movl 236(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 240(%esp), %ecx
+ movl 640(%esp), %eax
+ movl 80(%esp), %ebp # 4-byte Reload
+ movl %ebp, 28(%eax)
+ movl 52(%esp), %ebp # 4-byte Reload
+ adcl 16(%esp), %ebp # 4-byte Folded Reload
+ adcl 28(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 16(%esp) # 4-byte Spill
+ adcl 48(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 20(%esp) # 4-byte Spill
+ adcl 12(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 24(%esp) # 4-byte Spill
+ adcl 8(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 12(%esp) # 4-byte Spill
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl %eax, 40(%esp) # 4-byte Folded Spill
+ adcl 64(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 44(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 72(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 76(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 36(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 68(%esp) # 4-byte Spill
+ adcl $0, 32(%esp) # 4-byte Folded Spill
+ movl 644(%esp), %edx
+ movl 32(%edx), %eax
+ movl %eax, (%esp)
+ leal 184(%esp), %ecx
+ movl 84(%esp), %ebx # 4-byte Reload
+ calll .LmulPv352x32
+ addl 184(%esp), %ebp
+ movl %ebp, 52(%esp) # 4-byte Spill
+ movl 228(%esp), %eax
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl 224(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 220(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 216(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 212(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 208(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 204(%esp), %ebp
+ movl 200(%esp), %edi
+ movl 196(%esp), %esi
+ movl 188(%esp), %ecx
+ movl 192(%esp), %edx
+ movl 640(%esp), %eax
+ movl 52(%esp), %ebx # 4-byte Reload
+ movl %ebx, 32(%eax)
+ adcl 16(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 52(%esp) # 4-byte Spill
+ adcl 20(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 16(%esp) # 4-byte Spill
+ adcl 24(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 20(%esp) # 4-byte Spill
+ adcl 12(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 24(%esp) # 4-byte Spill
+ adcl 40(%esp), %ebp # 4-byte Folded Reload
+ movl %ebp, 12(%esp) # 4-byte Spill
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl %eax, 36(%esp) # 4-byte Folded Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 56(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 60(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 68(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 32(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 80(%esp) # 4-byte Spill
+ adcl $0, 28(%esp) # 4-byte Folded Spill
+ movl 644(%esp), %edx
+ movl 36(%edx), %eax
+ movl %eax, (%esp)
+ leal 136(%esp), %ecx
+ movl 84(%esp), %ebx # 4-byte Reload
+ calll .LmulPv352x32
+ movl 52(%esp), %eax # 4-byte Reload
+ addl 136(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 180(%esp), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 176(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 172(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 168(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 164(%esp), %ebp
+ movl 160(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 156(%esp), %edi
+ movl 152(%esp), %esi
+ movl 148(%esp), %edx
+ movl 140(%esp), %ecx
+ movl 144(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 640(%esp), %eax
+ movl 52(%esp), %ebx # 4-byte Reload
+ movl %ebx, 36(%eax)
+ adcl 16(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 52(%esp) # 4-byte Spill
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 20(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 44(%esp) # 4-byte Spill
+ adcl 24(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 56(%esp) # 4-byte Spill
+ adcl 12(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 24(%esp) # 4-byte Spill
+ adcl 36(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 36(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl %eax, 40(%esp) # 4-byte Folded Spill
+ adcl 64(%esp), %ebp # 4-byte Folded Reload
+ movl %ebp, 64(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 72(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 80(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 28(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 76(%esp) # 4-byte Spill
+ adcl $0, 32(%esp) # 4-byte Folded Spill
+ movl 644(%esp), %edx
+ movl 40(%edx), %eax
+ movl %eax, (%esp)
+ leal 88(%esp), %ecx
+ movl 84(%esp), %ebx # 4-byte Reload
+ calll .LmulPv352x32
+ movl 52(%esp), %eax # 4-byte Reload
+ addl 88(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 44(%esp), %ebp # 4-byte Reload
+ adcl 92(%esp), %ebp
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 96(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 132(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 128(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 124(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 120(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 116(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 112(%esp), %edi
+ movl 108(%esp), %esi
+ movl 104(%esp), %edx
+ movl 100(%esp), %ecx
+ movl 640(%esp), %eax
+ movl 52(%esp), %ebx # 4-byte Reload
+ movl %ebx, 40(%eax)
+ movl %ebp, 44(%eax)
+ adcl 24(%esp), %ecx # 4-byte Folded Reload
+ movl 56(%esp), %ebp # 4-byte Reload
+ movl %ebp, 48(%eax)
+ adcl 36(%esp), %edx # 4-byte Folded Reload
+ movl %ecx, 52(%eax)
+ adcl 40(%esp), %esi # 4-byte Folded Reload
+ movl %edx, 56(%eax)
+ adcl 64(%esp), %edi # 4-byte Folded Reload
+ movl %esi, 60(%eax)
+ movl 44(%esp), %ecx # 4-byte Reload
+ adcl 68(%esp), %ecx # 4-byte Folded Reload
+ movl %edi, 64(%eax)
+ movl 48(%esp), %edx # 4-byte Reload
+ adcl 60(%esp), %edx # 4-byte Folded Reload
+ movl %ecx, 68(%eax)
+ movl 72(%esp), %ecx # 4-byte Reload
+ adcl 76(%esp), %ecx # 4-byte Folded Reload
+ movl %edx, 72(%eax)
+ movl %ecx, 76(%eax)
+ movl 80(%esp), %ecx # 4-byte Reload
+ adcl 32(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 80(%eax)
+ movl 84(%esp), %ecx # 4-byte Reload
+ adcl $0, %ecx
+ movl %ecx, 84(%eax)
+ addl $620, %esp # imm = 0x26C
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end161:
+ .size mcl_fpDbl_sqrPre11L, .Lfunc_end161-mcl_fpDbl_sqrPre11L
+
+ .globl mcl_fp_mont11L
+ .align 16, 0x90
+ .type mcl_fp_mont11L,@function
+mcl_fp_mont11L: # @mcl_fp_mont11L
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $1132, %esp # imm = 0x46C
+ calll .L162$pb
+.L162$pb:
+ popl %ebx
+.Ltmp23:
+ addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp23-.L162$pb), %ebx
+ movl 1164(%esp), %eax
+ movl -4(%eax), %ebp
+ movl %ebp, 20(%esp) # 4-byte Spill
+ movl 1160(%esp), %eax
+ movl (%eax), %eax
+ movl %eax, (%esp)
+ leal 1080(%esp), %ecx
+ movl 1156(%esp), %edx
+ calll .LmulPv352x32
+ movl 1080(%esp), %edi
+ movl 1084(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl %edi, %eax
+ imull %ebp, %eax
+ movl 1124(%esp), %ecx
+ movl %ecx, 68(%esp) # 4-byte Spill
+ movl 1120(%esp), %ecx
+ movl %ecx, 64(%esp) # 4-byte Spill
+ movl 1116(%esp), %ecx
+ movl %ecx, 60(%esp) # 4-byte Spill
+ movl 1112(%esp), %ecx
+ movl %ecx, 44(%esp) # 4-byte Spill
+ movl 1108(%esp), %ecx
+ movl %ecx, 32(%esp) # 4-byte Spill
+ movl 1104(%esp), %ecx
+ movl %ecx, 28(%esp) # 4-byte Spill
+ movl 1100(%esp), %ecx
+ movl %ecx, 24(%esp) # 4-byte Spill
+ movl 1096(%esp), %ecx
+ movl %ecx, 36(%esp) # 4-byte Spill
+ movl 1092(%esp), %esi
+ movl 1088(%esp), %ebp
+ movl %eax, (%esp)
+ leal 1032(%esp), %ecx
+ movl 1164(%esp), %edx
+ calll .LmulPv352x32
+ addl 1032(%esp), %edi
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 1036(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ adcl 1040(%esp), %ebp
+ adcl 1044(%esp), %esi
+ movl %esi, 40(%esp) # 4-byte Spill
+ movl 36(%esp), %eax # 4-byte Reload
+ adcl 1048(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 24(%esp), %eax # 4-byte Reload
+ adcl 1052(%esp), %eax
+ movl %eax, 24(%esp) # 4-byte Spill
+ movl 28(%esp), %eax # 4-byte Reload
+ adcl 1056(%esp), %eax
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl 32(%esp), %eax # 4-byte Reload
+ adcl 1060(%esp), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 1064(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 1068(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 1072(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 68(%esp), %esi # 4-byte Reload
+ adcl 1076(%esp), %esi
+ sbbl %edi, %edi
+ movl 1160(%esp), %eax
+ movl 4(%eax), %eax
+ movl %eax, (%esp)
+ leal 984(%esp), %ecx
+ movl 1156(%esp), %edx
+ calll .LmulPv352x32
+ andl $1, %edi
+ movl 56(%esp), %ecx # 4-byte Reload
+ addl 984(%esp), %ecx
+ adcl 988(%esp), %ebp
+ movl 40(%esp), %eax # 4-byte Reload
+ adcl 992(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 36(%esp), %eax # 4-byte Reload
+ adcl 996(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 24(%esp), %eax # 4-byte Reload
+ adcl 1000(%esp), %eax
+ movl %eax, 24(%esp) # 4-byte Spill
+ movl 28(%esp), %eax # 4-byte Reload
+ adcl 1004(%esp), %eax
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl 32(%esp), %eax # 4-byte Reload
+ adcl 1008(%esp), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 1012(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 1016(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 1020(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ adcl 1024(%esp), %esi
+ movl %esi, 68(%esp) # 4-byte Spill
+ adcl 1028(%esp), %edi
+ movl %edi, 56(%esp) # 4-byte Spill
+ sbbl %edi, %edi
+ movl %ecx, %eax
+ movl %ecx, %esi
+ imull 20(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 936(%esp), %ecx
+ movl 1164(%esp), %edx
+ calll .LmulPv352x32
+ andl $1, %edi
+ movl %edi, %ecx
+ addl 936(%esp), %esi
+ adcl 940(%esp), %ebp
+ movl 40(%esp), %eax # 4-byte Reload
+ adcl 944(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 36(%esp), %eax # 4-byte Reload
+ adcl 948(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 24(%esp), %eax # 4-byte Reload
+ adcl 952(%esp), %eax
+ movl %eax, 24(%esp) # 4-byte Spill
+ movl 28(%esp), %eax # 4-byte Reload
+ adcl 956(%esp), %eax
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl 32(%esp), %eax # 4-byte Reload
+ adcl 960(%esp), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 44(%esp), %edi # 4-byte Reload
+ adcl 964(%esp), %edi
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 968(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 972(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 976(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 56(%esp), %esi # 4-byte Reload
+ adcl 980(%esp), %esi
+ adcl $0, %ecx
+ movl %ecx, 52(%esp) # 4-byte Spill
+ movl 1160(%esp), %eax
+ movl 8(%eax), %eax
+ movl %eax, (%esp)
+ leal 888(%esp), %ecx
+ movl 1156(%esp), %edx
+ calll .LmulPv352x32
+ addl 888(%esp), %ebp
+ movl 40(%esp), %eax # 4-byte Reload
+ adcl 892(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 36(%esp), %eax # 4-byte Reload
+ adcl 896(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 24(%esp), %eax # 4-byte Reload
+ adcl 900(%esp), %eax
+ movl %eax, 24(%esp) # 4-byte Spill
+ movl 28(%esp), %eax # 4-byte Reload
+ adcl 904(%esp), %eax
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl 32(%esp), %eax # 4-byte Reload
+ adcl 908(%esp), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ adcl 912(%esp), %edi
+ movl %edi, 44(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 916(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 920(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 924(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ adcl 928(%esp), %esi
+ movl %esi, %edi
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 932(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ sbbl %esi, %esi
+ movl %ebp, %eax
+ imull 20(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 840(%esp), %ecx
+ movl 1164(%esp), %edx
+ calll .LmulPv352x32
+ movl %esi, %eax
+ andl $1, %eax
+ addl 840(%esp), %ebp
+ movl 40(%esp), %ecx # 4-byte Reload
+ adcl 844(%esp), %ecx
+ movl %ecx, 40(%esp) # 4-byte Spill
+ movl 36(%esp), %ecx # 4-byte Reload
+ adcl 848(%esp), %ecx
+ movl %ecx, 36(%esp) # 4-byte Spill
+ movl 24(%esp), %ecx # 4-byte Reload
+ adcl 852(%esp), %ecx
+ movl %ecx, 24(%esp) # 4-byte Spill
+ movl 28(%esp), %ecx # 4-byte Reload
+ adcl 856(%esp), %ecx
+ movl %ecx, 28(%esp) # 4-byte Spill
+ movl 32(%esp), %ecx # 4-byte Reload
+ adcl 860(%esp), %ecx
+ movl %ecx, 32(%esp) # 4-byte Spill
+ movl 44(%esp), %ecx # 4-byte Reload
+ adcl 864(%esp), %ecx
+ movl %ecx, 44(%esp) # 4-byte Spill
+ movl 60(%esp), %ecx # 4-byte Reload
+ adcl 868(%esp), %ecx
+ movl %ecx, 60(%esp) # 4-byte Spill
+ movl 64(%esp), %ebp # 4-byte Reload
+ adcl 872(%esp), %ebp
+ movl 68(%esp), %esi # 4-byte Reload
+ adcl 876(%esp), %esi
+ adcl 880(%esp), %edi
+ movl 52(%esp), %ecx # 4-byte Reload
+ adcl 884(%esp), %ecx
+ movl %ecx, 52(%esp) # 4-byte Spill
+ adcl $0, %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 1160(%esp), %eax
+ movl 12(%eax), %eax
+ movl %eax, (%esp)
+ leal 792(%esp), %ecx
+ movl 1156(%esp), %edx
+ calll .LmulPv352x32
+ movl 40(%esp), %ecx # 4-byte Reload
+ addl 792(%esp), %ecx
+ movl 36(%esp), %eax # 4-byte Reload
+ adcl 796(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 24(%esp), %eax # 4-byte Reload
+ adcl 800(%esp), %eax
+ movl %eax, 24(%esp) # 4-byte Spill
+ movl 28(%esp), %eax # 4-byte Reload
+ adcl 804(%esp), %eax
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl 32(%esp), %eax # 4-byte Reload
+ adcl 808(%esp), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 812(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 816(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ adcl 820(%esp), %ebp
+ movl %ebp, 64(%esp) # 4-byte Spill
+ adcl 824(%esp), %esi
+ movl %esi, 68(%esp) # 4-byte Spill
+ adcl 828(%esp), %edi
+ movl %edi, 56(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 832(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 48(%esp), %esi # 4-byte Reload
+ adcl 836(%esp), %esi
+ sbbl %edi, %edi
+ movl %ecx, %eax
+ movl %ecx, %ebp
+ imull 20(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 744(%esp), %ecx
+ movl 1164(%esp), %eax
+ movl %eax, %edx
+ calll .LmulPv352x32
+ andl $1, %edi
+ movl %edi, %ecx
+ addl 744(%esp), %ebp
+ movl 36(%esp), %eax # 4-byte Reload
+ adcl 748(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 24(%esp), %eax # 4-byte Reload
+ adcl 752(%esp), %eax
+ movl %eax, 24(%esp) # 4-byte Spill
+ movl 28(%esp), %eax # 4-byte Reload
+ adcl 756(%esp), %eax
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl 32(%esp), %eax # 4-byte Reload
+ adcl 760(%esp), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 764(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 60(%esp), %ebp # 4-byte Reload
+ adcl 768(%esp), %ebp
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 772(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 68(%esp), %edi # 4-byte Reload
+ adcl 776(%esp), %edi
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 780(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 784(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ adcl 788(%esp), %esi
+ movl %esi, 48(%esp) # 4-byte Spill
+ adcl $0, %ecx
+ movl %ecx, 40(%esp) # 4-byte Spill
+ movl 1160(%esp), %eax
+ movl 16(%eax), %eax
+ movl %eax, (%esp)
+ leal 696(%esp), %ecx
+ movl 1156(%esp), %eax
+ movl %eax, %edx
+ calll .LmulPv352x32
+ movl 36(%esp), %ecx # 4-byte Reload
+ addl 696(%esp), %ecx
+ movl 24(%esp), %eax # 4-byte Reload
+ adcl 700(%esp), %eax
+ movl %eax, 24(%esp) # 4-byte Spill
+ movl 28(%esp), %eax # 4-byte Reload
+ adcl 704(%esp), %eax
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl 32(%esp), %eax # 4-byte Reload
+ adcl 708(%esp), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 712(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ adcl 716(%esp), %ebp
+ movl %ebp, 60(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 720(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ adcl 724(%esp), %edi
+ movl %edi, 68(%esp) # 4-byte Spill
+ movl 56(%esp), %esi # 4-byte Reload
+ adcl 728(%esp), %esi
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 732(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 736(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 40(%esp), %eax # 4-byte Reload
+ adcl 740(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ sbbl %edi, %edi
+ movl %ecx, %eax
+ movl %ecx, %ebp
+ imull 20(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 648(%esp), %ecx
+ movl 1164(%esp), %edx
+ calll .LmulPv352x32
+ andl $1, %edi
+ movl %edi, %ecx
+ addl 648(%esp), %ebp
+ movl 24(%esp), %eax # 4-byte Reload
+ adcl 652(%esp), %eax
+ movl %eax, 24(%esp) # 4-byte Spill
+ movl 28(%esp), %ebp # 4-byte Reload
+ adcl 656(%esp), %ebp
+ movl 32(%esp), %eax # 4-byte Reload
+ adcl 660(%esp), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 664(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 668(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 672(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 68(%esp), %edi # 4-byte Reload
+ adcl 676(%esp), %edi
+ adcl 680(%esp), %esi
+ movl %esi, 56(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 684(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 688(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 40(%esp), %esi # 4-byte Reload
+ adcl 692(%esp), %esi
+ adcl $0, %ecx
+ movl %ecx, 36(%esp) # 4-byte Spill
+ movl 1160(%esp), %eax
+ movl 20(%eax), %eax
+ movl %eax, (%esp)
+ leal 600(%esp), %ecx
+ movl 1156(%esp), %edx
+ calll .LmulPv352x32
+ movl 24(%esp), %ecx # 4-byte Reload
+ addl 600(%esp), %ecx
+ adcl 604(%esp), %ebp
+ movl %ebp, 28(%esp) # 4-byte Spill
+ movl 32(%esp), %ebp # 4-byte Reload
+ adcl 608(%esp), %ebp
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 612(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 616(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 620(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ adcl 624(%esp), %edi
+ movl %edi, 68(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 628(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 632(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 636(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ adcl 640(%esp), %esi
+ movl %esi, 40(%esp) # 4-byte Spill
+ movl 36(%esp), %eax # 4-byte Reload
+ adcl 644(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ sbbl %esi, %esi
+ movl %ecx, %eax
+ movl %ecx, %edi
+ imull 20(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 552(%esp), %ecx
+ movl 1164(%esp), %edx
+ calll .LmulPv352x32
+ andl $1, %esi
+ movl %esi, %eax
+ addl 552(%esp), %edi
+ movl 28(%esp), %ecx # 4-byte Reload
+ adcl 556(%esp), %ecx
+ movl %ecx, 28(%esp) # 4-byte Spill
+ movl %ebp, %edi
+ adcl 560(%esp), %edi
+ movl 44(%esp), %ecx # 4-byte Reload
+ adcl 564(%esp), %ecx
+ movl %ecx, 44(%esp) # 4-byte Spill
+ movl 60(%esp), %ecx # 4-byte Reload
+ adcl 568(%esp), %ecx
+ movl %ecx, 60(%esp) # 4-byte Spill
+ movl 64(%esp), %esi # 4-byte Reload
+ adcl 572(%esp), %esi
+ movl 68(%esp), %ecx # 4-byte Reload
+ adcl 576(%esp), %ecx
+ movl %ecx, 68(%esp) # 4-byte Spill
+ movl 56(%esp), %ecx # 4-byte Reload
+ adcl 580(%esp), %ecx
+ movl %ecx, 56(%esp) # 4-byte Spill
+ movl 52(%esp), %ebp # 4-byte Reload
+ adcl 584(%esp), %ebp
+ movl 48(%esp), %ecx # 4-byte Reload
+ adcl 588(%esp), %ecx
+ movl %ecx, 48(%esp) # 4-byte Spill
+ movl 40(%esp), %ecx # 4-byte Reload
+ adcl 592(%esp), %ecx
+ movl %ecx, 40(%esp) # 4-byte Spill
+ movl 36(%esp), %ecx # 4-byte Reload
+ adcl 596(%esp), %ecx
+ movl %ecx, 36(%esp) # 4-byte Spill
+ adcl $0, %eax
+ movl %eax, 24(%esp) # 4-byte Spill
+ movl 1160(%esp), %eax
+ movl 24(%eax), %eax
+ movl %eax, (%esp)
+ leal 504(%esp), %ecx
+ movl 1156(%esp), %edx
+ calll .LmulPv352x32
+ movl 28(%esp), %ecx # 4-byte Reload
+ addl 504(%esp), %ecx
+ adcl 508(%esp), %edi
+ movl %edi, 32(%esp) # 4-byte Spill
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 512(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 516(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ adcl 520(%esp), %esi
+ movl %esi, 64(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 524(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 528(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ adcl 532(%esp), %ebp
+ movl %ebp, 52(%esp) # 4-byte Spill
+ movl 48(%esp), %esi # 4-byte Reload
+ adcl 536(%esp), %esi
+ movl 40(%esp), %eax # 4-byte Reload
+ adcl 540(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 36(%esp), %eax # 4-byte Reload
+ adcl 544(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 24(%esp), %eax # 4-byte Reload
+ adcl 548(%esp), %eax
+ movl %eax, 24(%esp) # 4-byte Spill
+ sbbl %ebp, %ebp
+ movl %ecx, %eax
+ movl %ecx, %edi
+ imull 20(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 456(%esp), %ecx
+ movl 1164(%esp), %eax
+ movl %eax, %edx
+ calll .LmulPv352x32
+ andl $1, %ebp
+ movl %ebp, %eax
+ addl 456(%esp), %edi
+ movl 32(%esp), %ecx # 4-byte Reload
+ adcl 460(%esp), %ecx
+ movl %ecx, 32(%esp) # 4-byte Spill
+ movl 44(%esp), %ebp # 4-byte Reload
+ adcl 464(%esp), %ebp
+ movl 60(%esp), %ecx # 4-byte Reload
+ adcl 468(%esp), %ecx
+ movl %ecx, 60(%esp) # 4-byte Spill
+ movl 64(%esp), %ecx # 4-byte Reload
+ adcl 472(%esp), %ecx
+ movl %ecx, 64(%esp) # 4-byte Spill
+ movl 68(%esp), %ecx # 4-byte Reload
+ adcl 476(%esp), %ecx
+ movl %ecx, 68(%esp) # 4-byte Spill
+ movl 56(%esp), %ecx # 4-byte Reload
+ adcl 480(%esp), %ecx
+ movl %ecx, 56(%esp) # 4-byte Spill
+ movl 52(%esp), %edi # 4-byte Reload
+ adcl 484(%esp), %edi
+ adcl 488(%esp), %esi
+ movl %esi, 48(%esp) # 4-byte Spill
+ movl 40(%esp), %ecx # 4-byte Reload
+ adcl 492(%esp), %ecx
+ movl %ecx, 40(%esp) # 4-byte Spill
+ movl 36(%esp), %esi # 4-byte Reload
+ adcl 496(%esp), %esi
+ movl 24(%esp), %ecx # 4-byte Reload
+ adcl 500(%esp), %ecx
+ movl %ecx, 24(%esp) # 4-byte Spill
+ adcl $0, %eax
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl 1160(%esp), %eax
+ movl 28(%eax), %eax
+ movl %eax, (%esp)
+ leal 408(%esp), %ecx
+ movl 1156(%esp), %edx
+ calll .LmulPv352x32
+ movl 32(%esp), %ecx # 4-byte Reload
+ addl 408(%esp), %ecx
+ adcl 412(%esp), %ebp
+ movl %ebp, 44(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 416(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 420(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 424(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 56(%esp), %ebp # 4-byte Reload
+ adcl 428(%esp), %ebp
+ adcl 432(%esp), %edi
+ movl %edi, 52(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 436(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 40(%esp), %eax # 4-byte Reload
+ adcl 440(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ adcl 444(%esp), %esi
+ movl %esi, 36(%esp) # 4-byte Spill
+ movl 24(%esp), %eax # 4-byte Reload
+ adcl 448(%esp), %eax
+ movl %eax, 24(%esp) # 4-byte Spill
+ movl 28(%esp), %eax # 4-byte Reload
+ adcl 452(%esp), %eax
+ movl %eax, 28(%esp) # 4-byte Spill
+ sbbl %edi, %edi
+ movl %ecx, %eax
+ movl %ecx, %esi
+ imull 20(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 360(%esp), %ecx
+ movl 1164(%esp), %edx
+ calll .LmulPv352x32
+ andl $1, %edi
+ movl %edi, %ecx
+ addl 360(%esp), %esi
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 364(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 60(%esp), %edi # 4-byte Reload
+ adcl 368(%esp), %edi
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 372(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 376(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ adcl 380(%esp), %ebp
+ movl %ebp, 56(%esp) # 4-byte Spill
+ movl 52(%esp), %esi # 4-byte Reload
+ adcl 384(%esp), %esi
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 388(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 40(%esp), %ebp # 4-byte Reload
+ adcl 392(%esp), %ebp
+ movl 36(%esp), %eax # 4-byte Reload
+ adcl 396(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 24(%esp), %eax # 4-byte Reload
+ adcl 400(%esp), %eax
+ movl %eax, 24(%esp) # 4-byte Spill
+ movl 28(%esp), %eax # 4-byte Reload
+ adcl 404(%esp), %eax
+ movl %eax, 28(%esp) # 4-byte Spill
+ adcl $0, %ecx
+ movl %ecx, 32(%esp) # 4-byte Spill
+ movl 1160(%esp), %eax
+ movl 32(%eax), %eax
+ movl %eax, (%esp)
+ leal 312(%esp), %ecx
+ movl 1156(%esp), %edx
+ calll .LmulPv352x32
+ movl 44(%esp), %ecx # 4-byte Reload
+ addl 312(%esp), %ecx
+ adcl 316(%esp), %edi
+ movl %edi, 60(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 320(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 324(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 328(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ adcl 332(%esp), %esi
+ movl %esi, 52(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 336(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ adcl 340(%esp), %ebp
+ movl %ebp, 40(%esp) # 4-byte Spill
+ movl 36(%esp), %eax # 4-byte Reload
+ adcl 344(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 24(%esp), %edi # 4-byte Reload
+ adcl 348(%esp), %edi
+ movl 28(%esp), %eax # 4-byte Reload
+ adcl 352(%esp), %eax
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl 32(%esp), %eax # 4-byte Reload
+ adcl 356(%esp), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ sbbl %ebp, %ebp
+ movl %ecx, %eax
+ movl %ecx, %esi
+ imull 20(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 264(%esp), %ecx
+ movl 1164(%esp), %edx
+ calll .LmulPv352x32
+ andl $1, %ebp
+ movl %ebp, %ecx
+ addl 264(%esp), %esi
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 268(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 272(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 68(%esp), %ebp # 4-byte Reload
+ adcl 276(%esp), %ebp
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 280(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 52(%esp), %esi # 4-byte Reload
+ adcl 284(%esp), %esi
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 288(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 40(%esp), %eax # 4-byte Reload
+ adcl 292(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 36(%esp), %eax # 4-byte Reload
+ adcl 296(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ adcl 300(%esp), %edi
+ movl %edi, 24(%esp) # 4-byte Spill
+ movl 28(%esp), %edi # 4-byte Reload
+ adcl 304(%esp), %edi
+ movl 32(%esp), %eax # 4-byte Reload
+ adcl 308(%esp), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ adcl $0, %ecx
+ movl %ecx, 44(%esp) # 4-byte Spill
+ movl 1160(%esp), %eax
+ movl 36(%eax), %eax
+ movl %eax, (%esp)
+ leal 216(%esp), %ecx
+ movl 1156(%esp), %edx
+ calll .LmulPv352x32
+ movl 60(%esp), %ecx # 4-byte Reload
+ addl 216(%esp), %ecx
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 220(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ adcl 224(%esp), %ebp
+ movl %ebp, 68(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 228(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ adcl 232(%esp), %esi
+ movl %esi, 52(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 236(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 40(%esp), %eax # 4-byte Reload
+ adcl 240(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 36(%esp), %eax # 4-byte Reload
+ adcl 244(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 24(%esp), %eax # 4-byte Reload
+ adcl 248(%esp), %eax
+ movl %eax, 24(%esp) # 4-byte Spill
+ adcl 252(%esp), %edi
+ movl %edi, 28(%esp) # 4-byte Spill
+ movl 32(%esp), %eax # 4-byte Reload
+ adcl 256(%esp), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 260(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ sbbl %esi, %esi
+ movl %ecx, %eax
+ movl %ecx, %ebp
+ imull 20(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 168(%esp), %ecx
+ movl 1164(%esp), %edx
+ calll .LmulPv352x32
+ movl %esi, %ecx
+ andl $1, %ecx
+ addl 168(%esp), %ebp
+ movl 64(%esp), %esi # 4-byte Reload
+ adcl 172(%esp), %esi
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 176(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 180(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 52(%esp), %ebp # 4-byte Reload
+ adcl 184(%esp), %ebp
+ movl 48(%esp), %edi # 4-byte Reload
+ adcl 188(%esp), %edi
+ movl 40(%esp), %eax # 4-byte Reload
+ adcl 192(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 36(%esp), %eax # 4-byte Reload
+ adcl 196(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 24(%esp), %eax # 4-byte Reload
+ adcl 200(%esp), %eax
+ movl %eax, 24(%esp) # 4-byte Spill
+ movl 28(%esp), %eax # 4-byte Reload
+ adcl 204(%esp), %eax
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl 32(%esp), %eax # 4-byte Reload
+ adcl 208(%esp), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 212(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ adcl $0, %ecx
+ movl %ecx, 64(%esp) # 4-byte Spill
+ movl 1160(%esp), %eax
+ movl 40(%eax), %eax
+ movl %eax, (%esp)
+ leal 120(%esp), %ecx
+ movl 1156(%esp), %edx
+ calll .LmulPv352x32
+ movl %esi, %ecx
+ addl 120(%esp), %ecx
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 124(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 128(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ adcl 132(%esp), %ebp
+ movl %ebp, 52(%esp) # 4-byte Spill
+ adcl 136(%esp), %edi
+ movl %edi, 48(%esp) # 4-byte Spill
+ movl 40(%esp), %eax # 4-byte Reload
+ adcl 140(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 36(%esp), %eax # 4-byte Reload
+ adcl 144(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 24(%esp), %eax # 4-byte Reload
+ adcl 148(%esp), %eax
+ movl %eax, 24(%esp) # 4-byte Spill
+ movl 28(%esp), %eax # 4-byte Reload
+ adcl 152(%esp), %eax
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl 32(%esp), %ebp # 4-byte Reload
+ adcl 156(%esp), %ebp
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 160(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 164(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ sbbl %esi, %esi
+ movl 20(%esp), %eax # 4-byte Reload
+ imull %ecx, %eax
+ movl %ecx, %edi
+ movl %eax, (%esp)
+ leal 72(%esp), %ecx
+ movl 1164(%esp), %eax
+ movl %eax, %edx
+ calll .LmulPv352x32
+ andl $1, %esi
+ addl 72(%esp), %edi
+ movl 48(%esp), %edi # 4-byte Reload
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 76(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 56(%esp), %ecx # 4-byte Reload
+ adcl 80(%esp), %ecx
+ movl %ecx, 56(%esp) # 4-byte Spill
+ movl 52(%esp), %edx # 4-byte Reload
+ adcl 84(%esp), %edx
+ movl %edx, 52(%esp) # 4-byte Spill
+ adcl 88(%esp), %edi
+ movl %edi, 48(%esp) # 4-byte Spill
+ movl 40(%esp), %edx # 4-byte Reload
+ adcl 92(%esp), %edx
+ movl %edx, 40(%esp) # 4-byte Spill
+ movl 36(%esp), %edx # 4-byte Reload
+ adcl 96(%esp), %edx
+ movl %edx, 36(%esp) # 4-byte Spill
+ movl 24(%esp), %edx # 4-byte Reload
+ adcl 100(%esp), %edx
+ movl %edx, 24(%esp) # 4-byte Spill
+ movl 28(%esp), %edx # 4-byte Reload
+ adcl 104(%esp), %edx
+ movl %edx, 28(%esp) # 4-byte Spill
+ adcl 108(%esp), %ebp
+ movl %ebp, 32(%esp) # 4-byte Spill
+ movl 44(%esp), %edx # 4-byte Reload
+ adcl 112(%esp), %edx
+ movl %edx, 44(%esp) # 4-byte Spill
+ movl 64(%esp), %edx # 4-byte Reload
+ adcl 116(%esp), %edx
+ movl %edx, 64(%esp) # 4-byte Spill
+ adcl $0, %esi
+ movl 1164(%esp), %ebp
+ subl (%ebp), %eax
+ movl %ecx, %edx
+ sbbl 4(%ebp), %edx
+ movl 52(%esp), %ecx # 4-byte Reload
+ sbbl 8(%ebp), %ecx
+ sbbl 12(%ebp), %edi
+ movl %edi, 4(%esp) # 4-byte Spill
+ movl 40(%esp), %edi # 4-byte Reload
+ sbbl 16(%ebp), %edi
+ movl %edi, 8(%esp) # 4-byte Spill
+ movl 36(%esp), %edi # 4-byte Reload
+ sbbl 20(%ebp), %edi
+ movl %edi, 12(%esp) # 4-byte Spill
+ movl 24(%esp), %edi # 4-byte Reload
+ sbbl 24(%ebp), %edi
+ movl %edi, 16(%esp) # 4-byte Spill
+ movl 28(%esp), %ebx # 4-byte Reload
+ sbbl 28(%ebp), %ebx
+ movl 32(%esp), %edi # 4-byte Reload
+ sbbl 32(%ebp), %edi
+ movl %edi, 20(%esp) # 4-byte Spill
+ movl 44(%esp), %edi # 4-byte Reload
+ sbbl 36(%ebp), %edi
+ movl %edi, 60(%esp) # 4-byte Spill
+ movl 64(%esp), %edi # 4-byte Reload
+ sbbl 40(%ebp), %edi
+ movl %edi, %ebp
+ sbbl $0, %esi
+ andl $1, %esi
+ jne .LBB162_2
+# BB#1:
+ movl %ebx, 28(%esp) # 4-byte Spill
+.LBB162_2:
+ movl %esi, %ebx
+ testb %bl, %bl
+ movl 68(%esp), %ebx # 4-byte Reload
+ jne .LBB162_4
+# BB#3:
+ movl %eax, %ebx
+.LBB162_4:
+ movl 1152(%esp), %eax
+ movl %ebx, (%eax)
+ movl 56(%esp), %edi # 4-byte Reload
+ jne .LBB162_6
+# BB#5:
+ movl %edx, %edi
+.LBB162_6:
+ movl %edi, 4(%eax)
+ movl 52(%esp), %edx # 4-byte Reload
+ jne .LBB162_8
+# BB#7:
+ movl %ecx, %edx
+.LBB162_8:
+ movl %edx, 8(%eax)
+ jne .LBB162_10
+# BB#9:
+ movl 4(%esp), %ecx # 4-byte Reload
+ movl %ecx, 48(%esp) # 4-byte Spill
+.LBB162_10:
+ movl 48(%esp), %ecx # 4-byte Reload
+ movl %ecx, 12(%eax)
+ movl 40(%esp), %ecx # 4-byte Reload
+ jne .LBB162_12
+# BB#11:
+ movl 8(%esp), %ecx # 4-byte Reload
+.LBB162_12:
+ movl %ecx, 16(%eax)
+ movl 36(%esp), %ecx # 4-byte Reload
+ jne .LBB162_14
+# BB#13:
+ movl 12(%esp), %ecx # 4-byte Reload
+.LBB162_14:
+ movl %ecx, 20(%eax)
+ movl 24(%esp), %ecx # 4-byte Reload
+ jne .LBB162_16
+# BB#15:
+ movl 16(%esp), %ecx # 4-byte Reload
+.LBB162_16:
+ movl %ecx, 24(%eax)
+ movl 28(%esp), %ecx # 4-byte Reload
+ movl %ecx, 28(%eax)
+ movl 32(%esp), %ecx # 4-byte Reload
+ jne .LBB162_18
+# BB#17:
+ movl 20(%esp), %ecx # 4-byte Reload
+.LBB162_18:
+ movl %ecx, 32(%eax)
+ movl 44(%esp), %ecx # 4-byte Reload
+ jne .LBB162_20
+# BB#19:
+ movl 60(%esp), %ecx # 4-byte Reload
+.LBB162_20:
+ movl %ecx, 36(%eax)
+ movl 64(%esp), %ecx # 4-byte Reload
+ jne .LBB162_22
+# BB#21:
+ movl %ebp, %ecx
+.LBB162_22:
+ movl %ecx, 40(%eax)
+ addl $1132, %esp # imm = 0x46C
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end162:
+ .size mcl_fp_mont11L, .Lfunc_end162-mcl_fp_mont11L
+
+ .globl mcl_fp_montNF11L
+ .align 16, 0x90
+ .type mcl_fp_montNF11L,@function
+mcl_fp_montNF11L: # @mcl_fp_montNF11L
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $1132, %esp # imm = 0x46C
+ calll .L163$pb
+.L163$pb:
+ popl %ebx
+.Ltmp24:
+ addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp24-.L163$pb), %ebx
+ movl 1164(%esp), %eax
+ movl -4(%eax), %esi
+ movl %esi, 24(%esp) # 4-byte Spill
+ movl 1160(%esp), %eax
+ movl (%eax), %eax
+ movl %eax, (%esp)
+ leal 1080(%esp), %ecx
+ movl 1156(%esp), %edx
+ calll .LmulPv352x32
+ movl 1080(%esp), %ebp
+ movl 1084(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl %ebp, %eax
+ imull %esi, %eax
+ movl 1124(%esp), %ecx
+ movl %ecx, 68(%esp) # 4-byte Spill
+ movl 1120(%esp), %ecx
+ movl %ecx, 64(%esp) # 4-byte Spill
+ movl 1116(%esp), %ecx
+ movl %ecx, 56(%esp) # 4-byte Spill
+ movl 1112(%esp), %ecx
+ movl %ecx, 52(%esp) # 4-byte Spill
+ movl 1108(%esp), %ecx
+ movl %ecx, 36(%esp) # 4-byte Spill
+ movl 1104(%esp), %ecx
+ movl %ecx, 32(%esp) # 4-byte Spill
+ movl 1100(%esp), %ecx
+ movl %ecx, 28(%esp) # 4-byte Spill
+ movl 1096(%esp), %ecx
+ movl %ecx, 40(%esp) # 4-byte Spill
+ movl 1092(%esp), %esi
+ movl 1088(%esp), %edi
+ movl %eax, (%esp)
+ leal 1032(%esp), %ecx
+ movl 1164(%esp), %edx
+ calll .LmulPv352x32
+ addl 1032(%esp), %ebp
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 1036(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ adcl 1040(%esp), %edi
+ adcl 1044(%esp), %esi
+ movl %esi, 44(%esp) # 4-byte Spill
+ movl 40(%esp), %ebp # 4-byte Reload
+ adcl 1048(%esp), %ebp
+ movl 28(%esp), %esi # 4-byte Reload
+ adcl 1052(%esp), %esi
+ movl 32(%esp), %eax # 4-byte Reload
+ adcl 1056(%esp), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 36(%esp), %eax # 4-byte Reload
+ adcl 1060(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 1064(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 1068(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 1072(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 1076(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 1160(%esp), %eax
+ movl 4(%eax), %eax
+ movl %eax, (%esp)
+ leal 984(%esp), %ecx
+ movl 1156(%esp), %edx
+ calll .LmulPv352x32
+ movl 1028(%esp), %edx
+ movl 60(%esp), %ecx # 4-byte Reload
+ addl 984(%esp), %ecx
+ adcl 988(%esp), %edi
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 992(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ adcl 996(%esp), %ebp
+ movl %ebp, 40(%esp) # 4-byte Spill
+ adcl 1000(%esp), %esi
+ movl %esi, 28(%esp) # 4-byte Spill
+ movl 32(%esp), %esi # 4-byte Reload
+ adcl 1004(%esp), %esi
+ movl 36(%esp), %eax # 4-byte Reload
+ adcl 1008(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 1012(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 1016(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 1020(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 1024(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ adcl $0, %edx
+ movl %edx, 60(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ movl %ecx, %ebp
+ imull 24(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 936(%esp), %ecx
+ movl 1164(%esp), %eax
+ movl %eax, %edx
+ calll .LmulPv352x32
+ addl 936(%esp), %ebp
+ adcl 940(%esp), %edi
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 944(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 40(%esp), %eax # 4-byte Reload
+ adcl 948(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 28(%esp), %eax # 4-byte Reload
+ adcl 952(%esp), %eax
+ movl %eax, 28(%esp) # 4-byte Spill
+ adcl 956(%esp), %esi
+ movl %esi, 32(%esp) # 4-byte Spill
+ movl 36(%esp), %esi # 4-byte Reload
+ adcl 960(%esp), %esi
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 964(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 968(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 972(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 976(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 60(%esp), %ebp # 4-byte Reload
+ adcl 980(%esp), %ebp
+ movl 1160(%esp), %eax
+ movl 8(%eax), %eax
+ movl %eax, (%esp)
+ leal 888(%esp), %ecx
+ movl 1156(%esp), %edx
+ calll .LmulPv352x32
+ movl 932(%esp), %eax
+ addl 888(%esp), %edi
+ movl 44(%esp), %ecx # 4-byte Reload
+ adcl 892(%esp), %ecx
+ movl %ecx, 44(%esp) # 4-byte Spill
+ movl 40(%esp), %ecx # 4-byte Reload
+ adcl 896(%esp), %ecx
+ movl %ecx, 40(%esp) # 4-byte Spill
+ movl 28(%esp), %ecx # 4-byte Reload
+ adcl 900(%esp), %ecx
+ movl %ecx, 28(%esp) # 4-byte Spill
+ movl 32(%esp), %ecx # 4-byte Reload
+ adcl 904(%esp), %ecx
+ movl %ecx, 32(%esp) # 4-byte Spill
+ adcl 908(%esp), %esi
+ movl 52(%esp), %ecx # 4-byte Reload
+ adcl 912(%esp), %ecx
+ movl %ecx, 52(%esp) # 4-byte Spill
+ movl 56(%esp), %ecx # 4-byte Reload
+ adcl 916(%esp), %ecx
+ movl %ecx, 56(%esp) # 4-byte Spill
+ movl 64(%esp), %ecx # 4-byte Reload
+ adcl 920(%esp), %ecx
+ movl %ecx, 64(%esp) # 4-byte Spill
+ movl 68(%esp), %ecx # 4-byte Reload
+ adcl 924(%esp), %ecx
+ movl %ecx, 68(%esp) # 4-byte Spill
+ adcl 928(%esp), %ebp
+ movl %ebp, 60(%esp) # 4-byte Spill
+ movl %eax, %ebp
+ adcl $0, %ebp
+ movl %edi, %eax
+ imull 24(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 840(%esp), %ecx
+ movl 1164(%esp), %edx
+ calll .LmulPv352x32
+ addl 840(%esp), %edi
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 844(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 40(%esp), %eax # 4-byte Reload
+ adcl 848(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 28(%esp), %eax # 4-byte Reload
+ adcl 852(%esp), %eax
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl 32(%esp), %eax # 4-byte Reload
+ adcl 856(%esp), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ adcl 860(%esp), %esi
+ movl %esi, 36(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 864(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 868(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 64(%esp), %edi # 4-byte Reload
+ adcl 872(%esp), %edi
+ movl 68(%esp), %esi # 4-byte Reload
+ adcl 876(%esp), %esi
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 880(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ adcl 884(%esp), %ebp
+ movl %ebp, 48(%esp) # 4-byte Spill
+ movl 1160(%esp), %eax
+ movl 12(%eax), %eax
+ movl %eax, (%esp)
+ leal 792(%esp), %ecx
+ movl 1156(%esp), %edx
+ calll .LmulPv352x32
+ movl 836(%esp), %eax
+ movl 44(%esp), %edx # 4-byte Reload
+ addl 792(%esp), %edx
+ movl 40(%esp), %ecx # 4-byte Reload
+ adcl 796(%esp), %ecx
+ movl %ecx, 40(%esp) # 4-byte Spill
+ movl 28(%esp), %ecx # 4-byte Reload
+ adcl 800(%esp), %ecx
+ movl %ecx, 28(%esp) # 4-byte Spill
+ movl 32(%esp), %ecx # 4-byte Reload
+ adcl 804(%esp), %ecx
+ movl %ecx, 32(%esp) # 4-byte Spill
+ movl 36(%esp), %ecx # 4-byte Reload
+ adcl 808(%esp), %ecx
+ movl %ecx, 36(%esp) # 4-byte Spill
+ movl 52(%esp), %ecx # 4-byte Reload
+ adcl 812(%esp), %ecx
+ movl %ecx, 52(%esp) # 4-byte Spill
+ movl 56(%esp), %ecx # 4-byte Reload
+ adcl 816(%esp), %ecx
+ movl %ecx, 56(%esp) # 4-byte Spill
+ adcl 820(%esp), %edi
+ movl %edi, 64(%esp) # 4-byte Spill
+ adcl 824(%esp), %esi
+ movl %esi, 68(%esp) # 4-byte Spill
+ movl 60(%esp), %ebp # 4-byte Reload
+ adcl 828(%esp), %ebp
+ movl 48(%esp), %ecx # 4-byte Reload
+ adcl 832(%esp), %ecx
+ movl %ecx, 48(%esp) # 4-byte Spill
+ adcl $0, %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl %edx, %esi
+ movl %esi, %eax
+ imull 24(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 744(%esp), %ecx
+ movl 1164(%esp), %edx
+ calll .LmulPv352x32
+ addl 744(%esp), %esi
+ movl 40(%esp), %eax # 4-byte Reload
+ adcl 748(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 28(%esp), %eax # 4-byte Reload
+ adcl 752(%esp), %eax
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl 32(%esp), %eax # 4-byte Reload
+ adcl 756(%esp), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 36(%esp), %eax # 4-byte Reload
+ adcl 760(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 52(%esp), %esi # 4-byte Reload
+ adcl 764(%esp), %esi
+ movl 56(%esp), %edi # 4-byte Reload
+ adcl 768(%esp), %edi
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 772(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 776(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ adcl 780(%esp), %ebp
+ movl %ebp, 60(%esp) # 4-byte Spill
+ movl 48(%esp), %ebp # 4-byte Reload
+ adcl 784(%esp), %ebp
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 788(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 1160(%esp), %eax
+ movl 16(%eax), %eax
+ movl %eax, (%esp)
+ leal 696(%esp), %ecx
+ movl 1156(%esp), %edx
+ calll .LmulPv352x32
+ movl 740(%esp), %edx
+ movl 40(%esp), %eax # 4-byte Reload
+ addl 696(%esp), %eax
+ movl 28(%esp), %ecx # 4-byte Reload
+ adcl 700(%esp), %ecx
+ movl %ecx, 28(%esp) # 4-byte Spill
+ movl 32(%esp), %ecx # 4-byte Reload
+ adcl 704(%esp), %ecx
+ movl %ecx, 32(%esp) # 4-byte Spill
+ movl 36(%esp), %ecx # 4-byte Reload
+ adcl 708(%esp), %ecx
+ movl %ecx, 36(%esp) # 4-byte Spill
+ adcl 712(%esp), %esi
+ movl %esi, 52(%esp) # 4-byte Spill
+ adcl 716(%esp), %edi
+ movl %edi, 56(%esp) # 4-byte Spill
+ movl 64(%esp), %edi # 4-byte Reload
+ adcl 720(%esp), %edi
+ movl 68(%esp), %ecx # 4-byte Reload
+ adcl 724(%esp), %ecx
+ movl %ecx, 68(%esp) # 4-byte Spill
+ movl 60(%esp), %ecx # 4-byte Reload
+ adcl 728(%esp), %ecx
+ movl %ecx, 60(%esp) # 4-byte Spill
+ adcl 732(%esp), %ebp
+ movl %ebp, 48(%esp) # 4-byte Spill
+ movl 44(%esp), %esi # 4-byte Reload
+ adcl 736(%esp), %esi
+ adcl $0, %edx
+ movl %edx, 40(%esp) # 4-byte Spill
+ movl %eax, %ebp
+ imull 24(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 648(%esp), %ecx
+ movl 1164(%esp), %edx
+ calll .LmulPv352x32
+ addl 648(%esp), %ebp
+ movl 28(%esp), %eax # 4-byte Reload
+ adcl 652(%esp), %eax
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl 32(%esp), %ebp # 4-byte Reload
+ adcl 656(%esp), %ebp
+ movl 36(%esp), %eax # 4-byte Reload
+ adcl 660(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 664(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 668(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ adcl 672(%esp), %edi
+ movl %edi, 64(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 676(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 680(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 684(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ adcl 688(%esp), %esi
+ movl %esi, %edi
+ movl 40(%esp), %esi # 4-byte Reload
+ adcl 692(%esp), %esi
+ movl 1160(%esp), %eax
+ movl 20(%eax), %eax
+ movl %eax, (%esp)
+ leal 600(%esp), %ecx
+ movl 1156(%esp), %eax
+ movl %eax, %edx
+ calll .LmulPv352x32
+ movl 644(%esp), %eax
+ movl 28(%esp), %ecx # 4-byte Reload
+ addl 600(%esp), %ecx
+ adcl 604(%esp), %ebp
+ movl %ebp, 32(%esp) # 4-byte Spill
+ movl 36(%esp), %edx # 4-byte Reload
+ adcl 608(%esp), %edx
+ movl %edx, 36(%esp) # 4-byte Spill
+ movl 52(%esp), %edx # 4-byte Reload
+ adcl 612(%esp), %edx
+ movl %edx, 52(%esp) # 4-byte Spill
+ movl 56(%esp), %edx # 4-byte Reload
+ adcl 616(%esp), %edx
+ movl %edx, 56(%esp) # 4-byte Spill
+ movl 64(%esp), %edx # 4-byte Reload
+ adcl 620(%esp), %edx
+ movl %edx, 64(%esp) # 4-byte Spill
+ movl 68(%esp), %edx # 4-byte Reload
+ adcl 624(%esp), %edx
+ movl %edx, 68(%esp) # 4-byte Spill
+ movl 60(%esp), %edx # 4-byte Reload
+ adcl 628(%esp), %edx
+ movl %edx, 60(%esp) # 4-byte Spill
+ movl 48(%esp), %edx # 4-byte Reload
+ adcl 632(%esp), %edx
+ movl %edx, 48(%esp) # 4-byte Spill
+ adcl 636(%esp), %edi
+ movl %edi, 44(%esp) # 4-byte Spill
+ adcl 640(%esp), %esi
+ movl %esi, 40(%esp) # 4-byte Spill
+ movl %eax, %ebp
+ adcl $0, %ebp
+ movl %ecx, %esi
+ movl %esi, %eax
+ imull 24(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 552(%esp), %ecx
+ movl 1164(%esp), %edx
+ calll .LmulPv352x32
+ addl 552(%esp), %esi
+ movl 32(%esp), %eax # 4-byte Reload
+ adcl 556(%esp), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 36(%esp), %edi # 4-byte Reload
+ adcl 560(%esp), %edi
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 564(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 568(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 572(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 68(%esp), %esi # 4-byte Reload
+ adcl 576(%esp), %esi
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 580(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 584(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 588(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 40(%esp), %eax # 4-byte Reload
+ adcl 592(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ adcl 596(%esp), %ebp
+ movl %ebp, 28(%esp) # 4-byte Spill
+ movl 1160(%esp), %ecx
+ movl %ecx, %eax
+ movl 24(%eax), %eax
+ movl %eax, (%esp)
+ leal 504(%esp), %ecx
+ movl 1156(%esp), %edx
+ calll .LmulPv352x32
+ movl 548(%esp), %edx
+ movl 32(%esp), %eax # 4-byte Reload
+ addl 504(%esp), %eax
+ adcl 508(%esp), %edi
+ movl %edi, 36(%esp) # 4-byte Spill
+ movl 52(%esp), %ecx # 4-byte Reload
+ adcl 512(%esp), %ecx
+ movl %ecx, 52(%esp) # 4-byte Spill
+ movl 56(%esp), %ebp # 4-byte Reload
+ adcl 516(%esp), %ebp
+ movl 64(%esp), %ecx # 4-byte Reload
+ adcl 520(%esp), %ecx
+ movl %ecx, 64(%esp) # 4-byte Spill
+ adcl 524(%esp), %esi
+ movl %esi, 68(%esp) # 4-byte Spill
+ movl 60(%esp), %ecx # 4-byte Reload
+ adcl 528(%esp), %ecx
+ movl %ecx, 60(%esp) # 4-byte Spill
+ movl 48(%esp), %ecx # 4-byte Reload
+ adcl 532(%esp), %ecx
+ movl %ecx, 48(%esp) # 4-byte Spill
+ movl 44(%esp), %edi # 4-byte Reload
+ adcl 536(%esp), %edi
+ movl 40(%esp), %ecx # 4-byte Reload
+ adcl 540(%esp), %ecx
+ movl %ecx, 40(%esp) # 4-byte Spill
+ movl 28(%esp), %ecx # 4-byte Reload
+ adcl 544(%esp), %ecx
+ movl %ecx, 28(%esp) # 4-byte Spill
+ adcl $0, %edx
+ movl %edx, 32(%esp) # 4-byte Spill
+ movl %eax, %esi
+ imull 24(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 456(%esp), %ecx
+ movl 1164(%esp), %edx
+ calll .LmulPv352x32
+ addl 456(%esp), %esi
+ movl 36(%esp), %eax # 4-byte Reload
+ adcl 460(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 464(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ adcl 468(%esp), %ebp
+ movl %ebp, 56(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 472(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 476(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 60(%esp), %esi # 4-byte Reload
+ adcl 480(%esp), %esi
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 484(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ adcl 488(%esp), %edi
+ movl %edi, 44(%esp) # 4-byte Spill
+ movl 40(%esp), %eax # 4-byte Reload
+ adcl 492(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 28(%esp), %edi # 4-byte Reload
+ adcl 496(%esp), %edi
+ movl 32(%esp), %eax # 4-byte Reload
+ adcl 500(%esp), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 1160(%esp), %eax
+ movl 28(%eax), %eax
+ movl %eax, (%esp)
+ leal 408(%esp), %ecx
+ movl 1156(%esp), %edx
+ calll .LmulPv352x32
+ movl 452(%esp), %edx
+ movl 36(%esp), %ecx # 4-byte Reload
+ addl 408(%esp), %ecx
+ movl 52(%esp), %ebp # 4-byte Reload
+ adcl 412(%esp), %ebp
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 416(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 420(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 424(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ adcl 428(%esp), %esi
+ movl %esi, 60(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 432(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 436(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 40(%esp), %eax # 4-byte Reload
+ adcl 440(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ adcl 444(%esp), %edi
+ movl %edi, 28(%esp) # 4-byte Spill
+ movl 32(%esp), %edi # 4-byte Reload
+ adcl 448(%esp), %edi
+ adcl $0, %edx
+ movl %edx, 36(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ movl %ecx, %esi
+ imull 24(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 360(%esp), %ecx
+ movl 1164(%esp), %edx
+ calll .LmulPv352x32
+ addl 360(%esp), %esi
+ adcl 364(%esp), %ebp
+ movl %ebp, 52(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 368(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 64(%esp), %esi # 4-byte Reload
+ adcl 372(%esp), %esi
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 376(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 380(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 384(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 388(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 40(%esp), %ebp # 4-byte Reload
+ adcl 392(%esp), %ebp
+ movl 28(%esp), %eax # 4-byte Reload
+ adcl 396(%esp), %eax
+ movl %eax, 28(%esp) # 4-byte Spill
+ adcl 400(%esp), %edi
+ movl %edi, 32(%esp) # 4-byte Spill
+ movl 36(%esp), %eax # 4-byte Reload
+ adcl 404(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 1160(%esp), %eax
+ movl 32(%eax), %eax
+ movl %eax, (%esp)
+ leal 312(%esp), %ecx
+ movl 1156(%esp), %edx
+ calll .LmulPv352x32
+ movl 356(%esp), %edx
+ movl 52(%esp), %ecx # 4-byte Reload
+ addl 312(%esp), %ecx
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 316(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ adcl 320(%esp), %esi
+ movl %esi, 64(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 324(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 328(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 48(%esp), %edi # 4-byte Reload
+ adcl 332(%esp), %edi
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 336(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ adcl 340(%esp), %ebp
+ movl %ebp, 40(%esp) # 4-byte Spill
+ movl 28(%esp), %eax # 4-byte Reload
+ adcl 344(%esp), %eax
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl 32(%esp), %eax # 4-byte Reload
+ adcl 348(%esp), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 36(%esp), %eax # 4-byte Reload
+ adcl 352(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ adcl $0, %edx
+ movl %edx, 52(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ movl %ecx, %esi
+ imull 24(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 264(%esp), %ecx
+ movl 1164(%esp), %edx
+ calll .LmulPv352x32
+ addl 264(%esp), %esi
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 268(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 272(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 68(%esp), %ebp # 4-byte Reload
+ adcl 276(%esp), %ebp
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 280(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl %edi, %esi
+ adcl 284(%esp), %esi
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 288(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 40(%esp), %edi # 4-byte Reload
+ adcl 292(%esp), %edi
+ movl 28(%esp), %eax # 4-byte Reload
+ adcl 296(%esp), %eax
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl 32(%esp), %eax # 4-byte Reload
+ adcl 300(%esp), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 36(%esp), %eax # 4-byte Reload
+ adcl 304(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 308(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 1160(%esp), %eax
+ movl 36(%eax), %eax
+ movl %eax, (%esp)
+ leal 216(%esp), %ecx
+ movl 1156(%esp), %edx
+ calll .LmulPv352x32
+ movl 260(%esp), %edx
+ movl 56(%esp), %ecx # 4-byte Reload
+ addl 216(%esp), %ecx
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 220(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ adcl 224(%esp), %ebp
+ movl %ebp, 68(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 228(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ adcl 232(%esp), %esi
+ movl %esi, 48(%esp) # 4-byte Spill
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 236(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ adcl 240(%esp), %edi
+ movl %edi, 40(%esp) # 4-byte Spill
+ movl 28(%esp), %ebp # 4-byte Reload
+ adcl 244(%esp), %ebp
+ movl 32(%esp), %eax # 4-byte Reload
+ adcl 248(%esp), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 36(%esp), %eax # 4-byte Reload
+ adcl 252(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 256(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ adcl $0, %edx
+ movl %edx, 56(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ movl %ecx, %edi
+ imull 24(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 168(%esp), %ecx
+ movl 1164(%esp), %edx
+ calll .LmulPv352x32
+ addl 168(%esp), %edi
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 172(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 68(%esp), %esi # 4-byte Reload
+ adcl 176(%esp), %esi
+ movl 60(%esp), %edi # 4-byte Reload
+ adcl 180(%esp), %edi
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 184(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 188(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 40(%esp), %eax # 4-byte Reload
+ adcl 192(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ adcl 196(%esp), %ebp
+ movl %ebp, 28(%esp) # 4-byte Spill
+ movl 32(%esp), %eax # 4-byte Reload
+ adcl 200(%esp), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 36(%esp), %ebp # 4-byte Reload
+ adcl 204(%esp), %ebp
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 208(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 212(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 1160(%esp), %eax
+ movl 40(%eax), %eax
+ movl %eax, (%esp)
+ leal 120(%esp), %ecx
+ movl 1156(%esp), %edx
+ calll .LmulPv352x32
+ movl 164(%esp), %edx
+ movl 64(%esp), %ecx # 4-byte Reload
+ addl 120(%esp), %ecx
+ adcl 124(%esp), %esi
+ movl %esi, 68(%esp) # 4-byte Spill
+ adcl 128(%esp), %edi
+ movl %edi, 60(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 132(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 44(%esp), %esi # 4-byte Reload
+ adcl 136(%esp), %esi
+ movl 40(%esp), %eax # 4-byte Reload
+ adcl 140(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 28(%esp), %eax # 4-byte Reload
+ adcl 144(%esp), %eax
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl 32(%esp), %eax # 4-byte Reload
+ adcl 148(%esp), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ adcl 152(%esp), %ebp
+ movl %ebp, 36(%esp) # 4-byte Spill
+ movl 52(%esp), %ebp # 4-byte Reload
+ adcl 156(%esp), %ebp
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 160(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ adcl $0, %edx
+ movl %edx, 64(%esp) # 4-byte Spill
+ movl 24(%esp), %eax # 4-byte Reload
+ imull %ecx, %eax
+ movl %ecx, %edi
+ movl %eax, (%esp)
+ leal 72(%esp), %ecx
+ movl 1164(%esp), %edx
+ calll .LmulPv352x32
+ addl 72(%esp), %edi
+ movl 48(%esp), %edi # 4-byte Reload
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 76(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 60(%esp), %ecx # 4-byte Reload
+ adcl 80(%esp), %ecx
+ movl %ecx, 60(%esp) # 4-byte Spill
+ adcl 84(%esp), %edi
+ adcl 88(%esp), %esi
+ movl %esi, 44(%esp) # 4-byte Spill
+ movl 40(%esp), %edx # 4-byte Reload
+ adcl 92(%esp), %edx
+ movl %edx, 40(%esp) # 4-byte Spill
+ movl 28(%esp), %edx # 4-byte Reload
+ adcl 96(%esp), %edx
+ movl %edx, 28(%esp) # 4-byte Spill
+ movl 32(%esp), %edx # 4-byte Reload
+ adcl 100(%esp), %edx
+ movl %edx, 32(%esp) # 4-byte Spill
+ movl 36(%esp), %edx # 4-byte Reload
+ adcl 104(%esp), %edx
+ movl %edx, 36(%esp) # 4-byte Spill
+ adcl 108(%esp), %ebp
+ movl %ebp, 52(%esp) # 4-byte Spill
+ movl 56(%esp), %edx # 4-byte Reload
+ adcl 112(%esp), %edx
+ movl %edx, 56(%esp) # 4-byte Spill
+ movl 64(%esp), %edx # 4-byte Reload
+ adcl 116(%esp), %edx
+ movl %edx, 64(%esp) # 4-byte Spill
+ movl %eax, %edx
+ movl 1164(%esp), %ebx
+ subl (%ebx), %edx
+ movl %ecx, %esi
+ sbbl 4(%ebx), %esi
+ movl %edi, %ecx
+ sbbl 8(%ebx), %ecx
+ movl 44(%esp), %eax # 4-byte Reload
+ sbbl 12(%ebx), %eax
+ movl 40(%esp), %ebp # 4-byte Reload
+ sbbl 16(%ebx), %ebp
+ movl %ebp, 4(%esp) # 4-byte Spill
+ movl 28(%esp), %ebp # 4-byte Reload
+ sbbl 20(%ebx), %ebp
+ movl %ebp, 8(%esp) # 4-byte Spill
+ movl 32(%esp), %ebp # 4-byte Reload
+ sbbl 24(%ebx), %ebp
+ movl %ebp, 12(%esp) # 4-byte Spill
+ movl 36(%esp), %ebp # 4-byte Reload
+ sbbl 28(%ebx), %ebp
+ movl %ebp, 16(%esp) # 4-byte Spill
+ movl 52(%esp), %ebp # 4-byte Reload
+ sbbl 32(%ebx), %ebp
+ movl %ebp, 20(%esp) # 4-byte Spill
+ movl 56(%esp), %ebp # 4-byte Reload
+ sbbl 36(%ebx), %ebp
+ movl %ebp, 24(%esp) # 4-byte Spill
+ movl 64(%esp), %ebp # 4-byte Reload
+ sbbl 40(%ebx), %ebp
+ movl %ebp, 48(%esp) # 4-byte Spill
+ movl %ebp, %ebx
+ sarl $31, %ebx
+ testl %ebx, %ebx
+ movl 68(%esp), %ebx # 4-byte Reload
+ js .LBB163_2
+# BB#1:
+ movl %edx, %ebx
+.LBB163_2:
+ movl 1152(%esp), %edx
+ movl %ebx, (%edx)
+ movl 60(%esp), %ebp # 4-byte Reload
+ js .LBB163_4
+# BB#3:
+ movl %esi, %ebp
+.LBB163_4:
+ movl %ebp, 4(%edx)
+ js .LBB163_6
+# BB#5:
+ movl %ecx, %edi
+.LBB163_6:
+ movl %edi, 8(%edx)
+ movl 44(%esp), %ecx # 4-byte Reload
+ js .LBB163_8
+# BB#7:
+ movl %eax, %ecx
+.LBB163_8:
+ movl %ecx, 12(%edx)
+ movl 40(%esp), %eax # 4-byte Reload
+ js .LBB163_10
+# BB#9:
+ movl 4(%esp), %eax # 4-byte Reload
+.LBB163_10:
+ movl %eax, 16(%edx)
+ movl 28(%esp), %eax # 4-byte Reload
+ js .LBB163_12
+# BB#11:
+ movl 8(%esp), %eax # 4-byte Reload
+.LBB163_12:
+ movl %eax, 20(%edx)
+ movl 32(%esp), %eax # 4-byte Reload
+ js .LBB163_14
+# BB#13:
+ movl 12(%esp), %eax # 4-byte Reload
+.LBB163_14:
+ movl %eax, 24(%edx)
+ movl 36(%esp), %eax # 4-byte Reload
+ js .LBB163_16
+# BB#15:
+ movl 16(%esp), %eax # 4-byte Reload
+.LBB163_16:
+ movl %eax, 28(%edx)
+ movl 52(%esp), %eax # 4-byte Reload
+ js .LBB163_18
+# BB#17:
+ movl 20(%esp), %eax # 4-byte Reload
+.LBB163_18:
+ movl %eax, 32(%edx)
+ movl 56(%esp), %eax # 4-byte Reload
+ js .LBB163_20
+# BB#19:
+ movl 24(%esp), %eax # 4-byte Reload
+.LBB163_20:
+ movl %eax, 36(%edx)
+ movl 64(%esp), %eax # 4-byte Reload
+ js .LBB163_22
+# BB#21:
+ movl 48(%esp), %eax # 4-byte Reload
+.LBB163_22:
+ movl %eax, 40(%edx)
+ addl $1132, %esp # imm = 0x46C
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end163:
+ .size mcl_fp_montNF11L, .Lfunc_end163-mcl_fp_montNF11L
+
+ .globl mcl_fp_montRed11L
+ .align 16, 0x90
+ .type mcl_fp_montRed11L,@function
+mcl_fp_montRed11L: # @mcl_fp_montRed11L
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $668, %esp # imm = 0x29C
+ calll .L164$pb
+.L164$pb:
+ popl %eax
+.Ltmp25:
+ addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp25-.L164$pb), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 696(%esp), %edx
+ movl -4(%edx), %esi
+ movl %esi, 68(%esp) # 4-byte Spill
+ movl 692(%esp), %ecx
+ movl (%ecx), %ebx
+ movl %ebx, 60(%esp) # 4-byte Spill
+ movl 4(%ecx), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ imull %esi, %ebx
+ movl 84(%ecx), %esi
+ movl %esi, 80(%esp) # 4-byte Spill
+ movl 80(%ecx), %esi
+ movl %esi, 96(%esp) # 4-byte Spill
+ movl 76(%ecx), %esi
+ movl %esi, 92(%esp) # 4-byte Spill
+ movl 72(%ecx), %esi
+ movl %esi, 104(%esp) # 4-byte Spill
+ movl 68(%ecx), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 64(%ecx), %eax
+ movl %eax, 120(%esp) # 4-byte Spill
+ movl 60(%ecx), %esi
+ movl %esi, 124(%esp) # 4-byte Spill
+ movl 56(%ecx), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 52(%ecx), %esi
+ movl %esi, 116(%esp) # 4-byte Spill
+ movl 48(%ecx), %esi
+ movl %esi, 128(%esp) # 4-byte Spill
+ movl 44(%ecx), %eax
+ movl %eax, 132(%esp) # 4-byte Spill
+ movl 40(%ecx), %esi
+ movl %esi, 100(%esp) # 4-byte Spill
+ movl 36(%ecx), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 32(%ecx), %esi
+ movl %esi, 88(%esp) # 4-byte Spill
+ movl 28(%ecx), %esi
+ movl %esi, 76(%esp) # 4-byte Spill
+ movl 24(%ecx), %ebp
+ movl 20(%ecx), %edi
+ movl 16(%ecx), %esi
+ movl 12(%ecx), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 8(%ecx), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl (%edx), %ecx
+ movl %ecx, 12(%esp) # 4-byte Spill
+ movl 40(%edx), %ecx
+ movl %ecx, 44(%esp) # 4-byte Spill
+ movl 36(%edx), %ecx
+ movl %ecx, 40(%esp) # 4-byte Spill
+ movl 32(%edx), %ecx
+ movl %ecx, 36(%esp) # 4-byte Spill
+ movl 28(%edx), %ecx
+ movl %ecx, 32(%esp) # 4-byte Spill
+ movl 24(%edx), %ecx
+ movl %ecx, 28(%esp) # 4-byte Spill
+ movl 20(%edx), %ecx
+ movl %ecx, 24(%esp) # 4-byte Spill
+ movl 16(%edx), %ecx
+ movl %ecx, 20(%esp) # 4-byte Spill
+ movl 12(%edx), %ecx
+ movl %ecx, 16(%esp) # 4-byte Spill
+ movl 8(%edx), %ecx
+ movl %ecx, 8(%esp) # 4-byte Spill
+ movl 4(%edx), %ecx
+ movl %ecx, 4(%esp) # 4-byte Spill
+ movl %ebx, (%esp)
+ leal 616(%esp), %ecx
+ movl 72(%esp), %ebx # 4-byte Reload
+ calll .LmulPv352x32
+ movl 60(%esp), %eax # 4-byte Reload
+ addl 616(%esp), %eax
+ movl 64(%esp), %ecx # 4-byte Reload
+ adcl 620(%esp), %ecx
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 624(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 628(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ adcl 632(%esp), %esi
+ movl %esi, 52(%esp) # 4-byte Spill
+ adcl 636(%esp), %edi
+ movl %edi, 60(%esp) # 4-byte Spill
+ adcl 640(%esp), %ebp
+ movl %ebp, 64(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 644(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 648(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 652(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 656(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 132(%esp), %eax # 4-byte Reload
+ adcl 660(%esp), %eax
+ movl %eax, 132(%esp) # 4-byte Spill
+ adcl $0, 128(%esp) # 4-byte Folded Spill
+ adcl $0, 116(%esp) # 4-byte Folded Spill
+ adcl $0, 112(%esp) # 4-byte Folded Spill
+ adcl $0, 124(%esp) # 4-byte Folded Spill
+ movl 120(%esp), %ebp # 4-byte Reload
+ adcl $0, %ebp
+ adcl $0, 108(%esp) # 4-byte Folded Spill
+ adcl $0, 104(%esp) # 4-byte Folded Spill
+ adcl $0, 92(%esp) # 4-byte Folded Spill
+ adcl $0, 96(%esp) # 4-byte Folded Spill
+ adcl $0, 80(%esp) # 4-byte Folded Spill
+ sbbl %edi, %edi
+ movl %ecx, %eax
+ movl %ecx, %esi
+ imull 68(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 568(%esp), %ecx
+ movl 696(%esp), %edx
+ movl 72(%esp), %ebx # 4-byte Reload
+ calll .LmulPv352x32
+ andl $1, %edi
+ movl %edi, %ecx
+ addl 568(%esp), %esi
+ movl 56(%esp), %edx # 4-byte Reload
+ adcl 572(%esp), %edx
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 576(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 580(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 584(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 588(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 592(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 596(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 84(%esp), %edi # 4-byte Reload
+ adcl 600(%esp), %edi
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 604(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 132(%esp), %eax # 4-byte Reload
+ adcl 608(%esp), %eax
+ movl %eax, 132(%esp) # 4-byte Spill
+ movl 128(%esp), %eax # 4-byte Reload
+ adcl 612(%esp), %eax
+ movl %eax, 128(%esp) # 4-byte Spill
+ adcl $0, 116(%esp) # 4-byte Folded Spill
+ adcl $0, 112(%esp) # 4-byte Folded Spill
+ adcl $0, 124(%esp) # 4-byte Folded Spill
+ adcl $0, %ebp
+ movl %ebp, 120(%esp) # 4-byte Spill
+ movl 108(%esp), %esi # 4-byte Reload
+ adcl $0, %esi
+ adcl $0, 104(%esp) # 4-byte Folded Spill
+ adcl $0, 92(%esp) # 4-byte Folded Spill
+ adcl $0, 96(%esp) # 4-byte Folded Spill
+ adcl $0, 80(%esp) # 4-byte Folded Spill
+ adcl $0, %ecx
+ movl %ecx, 56(%esp) # 4-byte Spill
+ movl %edx, %ebp
+ movl %ebp, %eax
+ imull 68(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 520(%esp), %ecx
+ movl 696(%esp), %edx
+ movl 72(%esp), %ebx # 4-byte Reload
+ calll .LmulPv352x32
+ addl 520(%esp), %ebp
+ movl 48(%esp), %ecx # 4-byte Reload
+ adcl 524(%esp), %ecx
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 528(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 532(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 536(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 540(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 544(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ adcl 548(%esp), %edi
+ movl %edi, 84(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 552(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 132(%esp), %eax # 4-byte Reload
+ adcl 556(%esp), %eax
+ movl %eax, 132(%esp) # 4-byte Spill
+ movl 128(%esp), %eax # 4-byte Reload
+ adcl 560(%esp), %eax
+ movl %eax, 128(%esp) # 4-byte Spill
+ movl 116(%esp), %eax # 4-byte Reload
+ adcl 564(%esp), %eax
+ movl %eax, 116(%esp) # 4-byte Spill
+ adcl $0, 112(%esp) # 4-byte Folded Spill
+ movl 124(%esp), %ebp # 4-byte Reload
+ adcl $0, %ebp
+ movl 120(%esp), %edi # 4-byte Reload
+ adcl $0, %edi
+ adcl $0, %esi
+ movl %esi, 108(%esp) # 4-byte Spill
+ adcl $0, 104(%esp) # 4-byte Folded Spill
+ adcl $0, 92(%esp) # 4-byte Folded Spill
+ adcl $0, 96(%esp) # 4-byte Folded Spill
+ adcl $0, 80(%esp) # 4-byte Folded Spill
+ adcl $0, 56(%esp) # 4-byte Folded Spill
+ movl %ecx, %eax
+ movl %ecx, %esi
+ imull 68(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 472(%esp), %ecx
+ movl 696(%esp), %edx
+ movl 72(%esp), %ebx # 4-byte Reload
+ calll .LmulPv352x32
+ addl 472(%esp), %esi
+ movl 52(%esp), %ecx # 4-byte Reload
+ adcl 476(%esp), %ecx
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 480(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 484(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 488(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 492(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 496(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 500(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 132(%esp), %eax # 4-byte Reload
+ adcl 504(%esp), %eax
+ movl %eax, 132(%esp) # 4-byte Spill
+ movl 128(%esp), %eax # 4-byte Reload
+ adcl 508(%esp), %eax
+ movl %eax, 128(%esp) # 4-byte Spill
+ movl 116(%esp), %eax # 4-byte Reload
+ adcl 512(%esp), %eax
+ movl %eax, 116(%esp) # 4-byte Spill
+ movl 112(%esp), %eax # 4-byte Reload
+ adcl 516(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ adcl $0, %ebp
+ movl %ebp, 124(%esp) # 4-byte Spill
+ adcl $0, %edi
+ movl %edi, 120(%esp) # 4-byte Spill
+ adcl $0, 108(%esp) # 4-byte Folded Spill
+ movl 104(%esp), %edi # 4-byte Reload
+ adcl $0, %edi
+ adcl $0, 92(%esp) # 4-byte Folded Spill
+ adcl $0, 96(%esp) # 4-byte Folded Spill
+ movl 80(%esp), %esi # 4-byte Reload
+ adcl $0, %esi
+ adcl $0, 56(%esp) # 4-byte Folded Spill
+ movl %ecx, %ebp
+ movl %ebp, %eax
+ imull 68(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 424(%esp), %ecx
+ movl 696(%esp), %edx
+ movl 72(%esp), %ebx # 4-byte Reload
+ calll .LmulPv352x32
+ addl 424(%esp), %ebp
+ movl 60(%esp), %ecx # 4-byte Reload
+ adcl 428(%esp), %ecx
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 432(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 436(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 440(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 444(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 448(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 132(%esp), %eax # 4-byte Reload
+ adcl 452(%esp), %eax
+ movl %eax, 132(%esp) # 4-byte Spill
+ movl 128(%esp), %eax # 4-byte Reload
+ adcl 456(%esp), %eax
+ movl %eax, 128(%esp) # 4-byte Spill
+ movl 116(%esp), %eax # 4-byte Reload
+ adcl 460(%esp), %eax
+ movl %eax, 116(%esp) # 4-byte Spill
+ movl 112(%esp), %ebp # 4-byte Reload
+ adcl 464(%esp), %ebp
+ movl 124(%esp), %eax # 4-byte Reload
+ adcl 468(%esp), %eax
+ movl %eax, 124(%esp) # 4-byte Spill
+ adcl $0, 120(%esp) # 4-byte Folded Spill
+ adcl $0, 108(%esp) # 4-byte Folded Spill
+ adcl $0, %edi
+ movl %edi, 104(%esp) # 4-byte Spill
+ adcl $0, 92(%esp) # 4-byte Folded Spill
+ adcl $0, 96(%esp) # 4-byte Folded Spill
+ adcl $0, %esi
+ movl %esi, 80(%esp) # 4-byte Spill
+ movl 56(%esp), %edi # 4-byte Reload
+ adcl $0, %edi
+ movl %ecx, %esi
+ movl %esi, %eax
+ imull 68(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 376(%esp), %ecx
+ movl 696(%esp), %edx
+ movl 72(%esp), %ebx # 4-byte Reload
+ calll .LmulPv352x32
+ addl 376(%esp), %esi
+ movl 64(%esp), %ecx # 4-byte Reload
+ adcl 380(%esp), %ecx
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 384(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 388(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 392(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 396(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 132(%esp), %eax # 4-byte Reload
+ adcl 400(%esp), %eax
+ movl %eax, 132(%esp) # 4-byte Spill
+ movl 128(%esp), %esi # 4-byte Reload
+ adcl 404(%esp), %esi
+ movl 116(%esp), %eax # 4-byte Reload
+ adcl 408(%esp), %eax
+ movl %eax, 116(%esp) # 4-byte Spill
+ adcl 412(%esp), %ebp
+ movl %ebp, 112(%esp) # 4-byte Spill
+ movl 124(%esp), %eax # 4-byte Reload
+ adcl 416(%esp), %eax
+ movl %eax, 124(%esp) # 4-byte Spill
+ movl 120(%esp), %eax # 4-byte Reload
+ adcl 420(%esp), %eax
+ movl %eax, 120(%esp) # 4-byte Spill
+ adcl $0, 108(%esp) # 4-byte Folded Spill
+ adcl $0, 104(%esp) # 4-byte Folded Spill
+ adcl $0, 92(%esp) # 4-byte Folded Spill
+ movl 96(%esp), %ebp # 4-byte Reload
+ adcl $0, %ebp
+ adcl $0, 80(%esp) # 4-byte Folded Spill
+ adcl $0, %edi
+ movl %edi, 56(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ movl %ecx, %edi
+ imull 68(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 328(%esp), %ecx
+ movl 696(%esp), %edx
+ movl 72(%esp), %ebx # 4-byte Reload
+ calll .LmulPv352x32
+ addl 328(%esp), %edi
+ movl 76(%esp), %ecx # 4-byte Reload
+ adcl 332(%esp), %ecx
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 336(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 340(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 100(%esp), %edi # 4-byte Reload
+ adcl 344(%esp), %edi
+ movl 132(%esp), %eax # 4-byte Reload
+ adcl 348(%esp), %eax
+ movl %eax, 132(%esp) # 4-byte Spill
+ adcl 352(%esp), %esi
+ movl %esi, 128(%esp) # 4-byte Spill
+ movl 116(%esp), %esi # 4-byte Reload
+ adcl 356(%esp), %esi
+ movl 112(%esp), %eax # 4-byte Reload
+ adcl 360(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 124(%esp), %eax # 4-byte Reload
+ adcl 364(%esp), %eax
+ movl %eax, 124(%esp) # 4-byte Spill
+ movl 120(%esp), %eax # 4-byte Reload
+ adcl 368(%esp), %eax
+ movl %eax, 120(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 372(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ adcl $0, 104(%esp) # 4-byte Folded Spill
+ adcl $0, 92(%esp) # 4-byte Folded Spill
+ adcl $0, %ebp
+ movl %ebp, 96(%esp) # 4-byte Spill
+ adcl $0, 80(%esp) # 4-byte Folded Spill
+ adcl $0, 56(%esp) # 4-byte Folded Spill
+ movl %ecx, %ebp
+ movl %ebp, %eax
+ imull 68(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 280(%esp), %ecx
+ movl 696(%esp), %eax
+ movl %eax, %edx
+ movl 72(%esp), %ebx # 4-byte Reload
+ calll .LmulPv352x32
+ addl 280(%esp), %ebp
+ movl 88(%esp), %ebp # 4-byte Reload
+ adcl 284(%esp), %ebp
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 288(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ adcl 292(%esp), %edi
+ movl %edi, 100(%esp) # 4-byte Spill
+ movl 132(%esp), %edi # 4-byte Reload
+ adcl 296(%esp), %edi
+ movl 128(%esp), %eax # 4-byte Reload
+ adcl 300(%esp), %eax
+ movl %eax, 128(%esp) # 4-byte Spill
+ adcl 304(%esp), %esi
+ movl %esi, 116(%esp) # 4-byte Spill
+ movl 112(%esp), %eax # 4-byte Reload
+ adcl 308(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 124(%esp), %eax # 4-byte Reload
+ adcl 312(%esp), %eax
+ movl %eax, 124(%esp) # 4-byte Spill
+ movl 120(%esp), %eax # 4-byte Reload
+ adcl 316(%esp), %eax
+ movl %eax, 120(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 320(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 324(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 92(%esp), %esi # 4-byte Reload
+ adcl $0, %esi
+ adcl $0, 96(%esp) # 4-byte Folded Spill
+ adcl $0, 80(%esp) # 4-byte Folded Spill
+ adcl $0, 56(%esp) # 4-byte Folded Spill
+ movl %ebp, %eax
+ imull 68(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 232(%esp), %ecx
+ movl 696(%esp), %edx
+ movl 72(%esp), %ebx # 4-byte Reload
+ calll .LmulPv352x32
+ addl 232(%esp), %ebp
+ movl 84(%esp), %ebp # 4-byte Reload
+ adcl 236(%esp), %ebp
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 240(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ adcl 244(%esp), %edi
+ movl %edi, 132(%esp) # 4-byte Spill
+ movl 128(%esp), %eax # 4-byte Reload
+ adcl 248(%esp), %eax
+ movl %eax, 128(%esp) # 4-byte Spill
+ movl 116(%esp), %eax # 4-byte Reload
+ adcl 252(%esp), %eax
+ movl %eax, 116(%esp) # 4-byte Spill
+ movl 112(%esp), %eax # 4-byte Reload
+ adcl 256(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 124(%esp), %eax # 4-byte Reload
+ adcl 260(%esp), %eax
+ movl %eax, 124(%esp) # 4-byte Spill
+ movl 120(%esp), %eax # 4-byte Reload
+ adcl 264(%esp), %eax
+ movl %eax, 120(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 268(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 272(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ adcl 276(%esp), %esi
+ movl %esi, 92(%esp) # 4-byte Spill
+ adcl $0, 96(%esp) # 4-byte Folded Spill
+ movl 80(%esp), %esi # 4-byte Reload
+ adcl $0, %esi
+ movl 56(%esp), %edi # 4-byte Reload
+ adcl $0, %edi
+ movl %ebp, %eax
+ imull 68(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 184(%esp), %ecx
+ movl 696(%esp), %edx
+ movl 72(%esp), %ebx # 4-byte Reload
+ calll .LmulPv352x32
+ addl 184(%esp), %ebp
+ movl 100(%esp), %ecx # 4-byte Reload
+ adcl 188(%esp), %ecx
+ movl 132(%esp), %eax # 4-byte Reload
+ adcl 192(%esp), %eax
+ movl %eax, 132(%esp) # 4-byte Spill
+ movl 128(%esp), %eax # 4-byte Reload
+ adcl 196(%esp), %eax
+ movl %eax, 128(%esp) # 4-byte Spill
+ movl 116(%esp), %eax # 4-byte Reload
+ adcl 200(%esp), %eax
+ movl %eax, 116(%esp) # 4-byte Spill
+ movl 112(%esp), %eax # 4-byte Reload
+ adcl 204(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 124(%esp), %eax # 4-byte Reload
+ adcl 208(%esp), %eax
+ movl %eax, 124(%esp) # 4-byte Spill
+ movl 120(%esp), %eax # 4-byte Reload
+ adcl 212(%esp), %eax
+ movl %eax, 120(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 216(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 220(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 224(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 228(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ adcl $0, %esi
+ movl %esi, 80(%esp) # 4-byte Spill
+ adcl $0, %edi
+ movl %edi, %ebp
+ movl 68(%esp), %eax # 4-byte Reload
+ imull %ecx, %eax
+ movl %ecx, %esi
+ movl %eax, (%esp)
+ leal 136(%esp), %ecx
+ movl 696(%esp), %edx
+ movl 72(%esp), %ebx # 4-byte Reload
+ calll .LmulPv352x32
+ addl 136(%esp), %esi
+ movl 132(%esp), %eax # 4-byte Reload
+ adcl 140(%esp), %eax
+ movl %eax, 132(%esp) # 4-byte Spill
+ movl %eax, %edi
+ movl 128(%esp), %edx # 4-byte Reload
+ adcl 144(%esp), %edx
+ movl %edx, 128(%esp) # 4-byte Spill
+ movl 116(%esp), %ecx # 4-byte Reload
+ adcl 148(%esp), %ecx
+ movl %ecx, 116(%esp) # 4-byte Spill
+ movl 112(%esp), %esi # 4-byte Reload
+ adcl 152(%esp), %esi
+ movl %esi, 112(%esp) # 4-byte Spill
+ movl 124(%esp), %eax # 4-byte Reload
+ adcl 156(%esp), %eax
+ movl %eax, 124(%esp) # 4-byte Spill
+ movl 120(%esp), %eax # 4-byte Reload
+ adcl 160(%esp), %eax
+ movl %eax, 120(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 164(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 168(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 172(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 176(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 80(%esp), %ebx # 4-byte Reload
+ adcl 180(%esp), %ebx
+ movl %ebx, 80(%esp) # 4-byte Spill
+ adcl $0, %ebp
+ subl 12(%esp), %edi # 4-byte Folded Reload
+ sbbl 4(%esp), %edx # 4-byte Folded Reload
+ sbbl 8(%esp), %ecx # 4-byte Folded Reload
+ sbbl 16(%esp), %esi # 4-byte Folded Reload
+ movl 124(%esp), %eax # 4-byte Reload
+ sbbl 20(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 120(%esp), %eax # 4-byte Reload
+ sbbl 24(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ sbbl 28(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 104(%esp), %eax # 4-byte Reload
+ sbbl 32(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ sbbl 36(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ sbbl 40(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl %ebx, %eax
+ movl %ebp, %ebx
+ sbbl 44(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 100(%esp) # 4-byte Spill
+ sbbl $0, %ebx
+ andl $1, %ebx
+ jne .LBB164_2
+# BB#1:
+ movl %esi, 112(%esp) # 4-byte Spill
+.LBB164_2:
+ testb %bl, %bl
+ movl 132(%esp), %esi # 4-byte Reload
+ jne .LBB164_4
+# BB#3:
+ movl %edi, %esi
+.LBB164_4:
+ movl 688(%esp), %edi
+ movl %esi, (%edi)
+ movl 104(%esp), %esi # 4-byte Reload
+ jne .LBB164_6
+# BB#5:
+ movl %edx, 128(%esp) # 4-byte Spill
+.LBB164_6:
+ movl 128(%esp), %edx # 4-byte Reload
+ movl %edx, 4(%edi)
+ movl 116(%esp), %edx # 4-byte Reload
+ jne .LBB164_8
+# BB#7:
+ movl %ecx, %edx
+.LBB164_8:
+ movl %edx, 8(%edi)
+ movl 112(%esp), %ecx # 4-byte Reload
+ movl %ecx, 12(%edi)
+ movl 92(%esp), %edx # 4-byte Reload
+ movl 124(%esp), %ecx # 4-byte Reload
+ jne .LBB164_10
+# BB#9:
+ movl 64(%esp), %ecx # 4-byte Reload
+.LBB164_10:
+ movl %ecx, 16(%edi)
+ movl 96(%esp), %ecx # 4-byte Reload
+ movl 120(%esp), %eax # 4-byte Reload
+ jne .LBB164_12
+# BB#11:
+ movl 68(%esp), %eax # 4-byte Reload
+.LBB164_12:
+ movl %eax, 20(%edi)
+ movl 80(%esp), %eax # 4-byte Reload
+ movl 108(%esp), %ebp # 4-byte Reload
+ jne .LBB164_14
+# BB#13:
+ movl 72(%esp), %ebp # 4-byte Reload
+.LBB164_14:
+ movl %ebp, 24(%edi)
+ jne .LBB164_16
+# BB#15:
+ movl 76(%esp), %esi # 4-byte Reload
+.LBB164_16:
+ movl %esi, 28(%edi)
+ jne .LBB164_18
+# BB#17:
+ movl 84(%esp), %edx # 4-byte Reload
+.LBB164_18:
+ movl %edx, 32(%edi)
+ jne .LBB164_20
+# BB#19:
+ movl 88(%esp), %ecx # 4-byte Reload
+.LBB164_20:
+ movl %ecx, 36(%edi)
+ jne .LBB164_22
+# BB#21:
+ movl 100(%esp), %eax # 4-byte Reload
+.LBB164_22:
+ movl %eax, 40(%edi)
+ addl $668, %esp # imm = 0x29C
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end164:
+ .size mcl_fp_montRed11L, .Lfunc_end164-mcl_fp_montRed11L
+
+ .globl mcl_fp_addPre11L
+ .align 16, 0x90
+ .type mcl_fp_addPre11L,@function
+mcl_fp_addPre11L: # @mcl_fp_addPre11L
+# BB#0:
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ movl 24(%esp), %eax
+ movl (%eax), %edx
+ movl 4(%eax), %esi
+ movl 20(%esp), %ecx
+ addl (%ecx), %edx
+ adcl 4(%ecx), %esi
+ movl 8(%eax), %edi
+ adcl 8(%ecx), %edi
+ movl 16(%esp), %ebx
+ movl %edx, (%ebx)
+ movl 12(%ecx), %edx
+ movl %esi, 4(%ebx)
+ movl 16(%ecx), %esi
+ adcl 12(%eax), %edx
+ adcl 16(%eax), %esi
+ movl %edi, 8(%ebx)
+ movl 20(%eax), %edi
+ movl %edx, 12(%ebx)
+ movl 20(%ecx), %edx
+ adcl %edi, %edx
+ movl 24(%eax), %edi
+ movl %esi, 16(%ebx)
+ movl 24(%ecx), %esi
+ adcl %edi, %esi
+ movl 28(%eax), %edi
+ movl %edx, 20(%ebx)
+ movl 28(%ecx), %edx
+ adcl %edi, %edx
+ movl 32(%eax), %edi
+ movl %esi, 24(%ebx)
+ movl 32(%ecx), %esi
+ adcl %edi, %esi
+ movl 36(%eax), %edi
+ movl %edx, 28(%ebx)
+ movl 36(%ecx), %edx
+ adcl %edi, %edx
+ movl %esi, 32(%ebx)
+ movl %edx, 36(%ebx)
+ movl 40(%eax), %eax
+ movl 40(%ecx), %ecx
+ adcl %eax, %ecx
+ movl %ecx, 40(%ebx)
+ sbbl %eax, %eax
+ andl $1, %eax
+ popl %esi
+ popl %edi
+ popl %ebx
+ retl
+.Lfunc_end165:
+ .size mcl_fp_addPre11L, .Lfunc_end165-mcl_fp_addPre11L
+
+ .globl mcl_fp_subPre11L
+ .align 16, 0x90
+ .type mcl_fp_subPre11L,@function
+mcl_fp_subPre11L: # @mcl_fp_subPre11L
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ movl 24(%esp), %ecx
+ movl (%ecx), %esi
+ movl 4(%ecx), %edi
+ xorl %eax, %eax
+ movl 28(%esp), %edx
+ subl (%edx), %esi
+ sbbl 4(%edx), %edi
+ movl 8(%ecx), %ebx
+ sbbl 8(%edx), %ebx
+ movl 20(%esp), %ebp
+ movl %esi, (%ebp)
+ movl 12(%ecx), %esi
+ sbbl 12(%edx), %esi
+ movl %edi, 4(%ebp)
+ movl 16(%ecx), %edi
+ sbbl 16(%edx), %edi
+ movl %ebx, 8(%ebp)
+ movl 20(%edx), %ebx
+ movl %esi, 12(%ebp)
+ movl 20(%ecx), %esi
+ sbbl %ebx, %esi
+ movl 24(%edx), %ebx
+ movl %edi, 16(%ebp)
+ movl 24(%ecx), %edi
+ sbbl %ebx, %edi
+ movl 28(%edx), %ebx
+ movl %esi, 20(%ebp)
+ movl 28(%ecx), %esi
+ sbbl %ebx, %esi
+ movl 32(%edx), %ebx
+ movl %edi, 24(%ebp)
+ movl 32(%ecx), %edi
+ sbbl %ebx, %edi
+ movl 36(%edx), %ebx
+ movl %esi, 28(%ebp)
+ movl 36(%ecx), %esi
+ sbbl %ebx, %esi
+ movl %edi, 32(%ebp)
+ movl %esi, 36(%ebp)
+ movl 40(%edx), %edx
+ movl 40(%ecx), %ecx
+ sbbl %edx, %ecx
+ movl %ecx, 40(%ebp)
+ sbbl $0, %eax
+ andl $1, %eax
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end166:
+ .size mcl_fp_subPre11L, .Lfunc_end166-mcl_fp_subPre11L
+
+ .globl mcl_fp_shr1_11L
+ .align 16, 0x90
+ .type mcl_fp_shr1_11L,@function
+mcl_fp_shr1_11L: # @mcl_fp_shr1_11L
+# BB#0:
+ pushl %esi
+ movl 12(%esp), %eax
+ movl (%eax), %ecx
+ movl 4(%eax), %edx
+ shrdl $1, %edx, %ecx
+ movl 8(%esp), %esi
+ movl %ecx, (%esi)
+ movl 8(%eax), %ecx
+ shrdl $1, %ecx, %edx
+ movl %edx, 4(%esi)
+ movl 12(%eax), %edx
+ shrdl $1, %edx, %ecx
+ movl %ecx, 8(%esi)
+ movl 16(%eax), %ecx
+ shrdl $1, %ecx, %edx
+ movl %edx, 12(%esi)
+ movl 20(%eax), %edx
+ shrdl $1, %edx, %ecx
+ movl %ecx, 16(%esi)
+ movl 24(%eax), %ecx
+ shrdl $1, %ecx, %edx
+ movl %edx, 20(%esi)
+ movl 28(%eax), %edx
+ shrdl $1, %edx, %ecx
+ movl %ecx, 24(%esi)
+ movl 32(%eax), %ecx
+ shrdl $1, %ecx, %edx
+ movl %edx, 28(%esi)
+ movl 36(%eax), %edx
+ shrdl $1, %edx, %ecx
+ movl %ecx, 32(%esi)
+ movl 40(%eax), %eax
+ shrdl $1, %eax, %edx
+ movl %edx, 36(%esi)
+ shrl %eax
+ movl %eax, 40(%esi)
+ popl %esi
+ retl
+.Lfunc_end167:
+ .size mcl_fp_shr1_11L, .Lfunc_end167-mcl_fp_shr1_11L
+
+ .globl mcl_fp_add11L
+ .align 16, 0x90
+ .type mcl_fp_add11L,@function
+mcl_fp_add11L: # @mcl_fp_add11L
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $32, %esp
+ movl 60(%esp), %edi
+ movl (%edi), %ecx
+ movl 4(%edi), %eax
+ movl 56(%esp), %esi
+ addl (%esi), %ecx
+ movl %ecx, 4(%esp) # 4-byte Spill
+ movl %ecx, %ebp
+ adcl 4(%esi), %eax
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl 8(%edi), %eax
+ adcl 8(%esi), %eax
+ movl %eax, 24(%esp) # 4-byte Spill
+ movl 12(%esi), %eax
+ movl 16(%esi), %ecx
+ adcl 12(%edi), %eax
+ movl %eax, 20(%esp) # 4-byte Spill
+ adcl 16(%edi), %ecx
+ movl %ecx, 16(%esp) # 4-byte Spill
+ movl 20(%esi), %eax
+ adcl 20(%edi), %eax
+ movl %eax, 12(%esp) # 4-byte Spill
+ movl 24(%esi), %eax
+ adcl 24(%edi), %eax
+ movl %eax, 8(%esp) # 4-byte Spill
+ movl 28(%esi), %ebx
+ adcl 28(%edi), %ebx
+ movl %ebx, (%esp) # 4-byte Spill
+ movl 32(%esi), %ecx
+ adcl 32(%edi), %ecx
+ movl 36(%esi), %eax
+ adcl 36(%edi), %eax
+ movl 40(%esi), %edx
+ adcl 40(%edi), %edx
+ movl 52(%esp), %esi
+ movl %ebp, (%esi)
+ movl 28(%esp), %edi # 4-byte Reload
+ movl %edi, 4(%esi)
+ movl 24(%esp), %edi # 4-byte Reload
+ movl %edi, 8(%esi)
+ movl 20(%esp), %ebp # 4-byte Reload
+ movl %ebp, 12(%esi)
+ movl 16(%esp), %ebp # 4-byte Reload
+ movl %ebp, 16(%esi)
+ movl 12(%esp), %ebp # 4-byte Reload
+ movl %ebp, 20(%esi)
+ movl 8(%esp), %ebp # 4-byte Reload
+ movl %ebp, 24(%esi)
+ movl %ebx, 28(%esi)
+ movl %ecx, 32(%esi)
+ movl %eax, 36(%esi)
+ movl %edx, 40(%esi)
+ sbbl %ebx, %ebx
+ andl $1, %ebx
+ movl 64(%esp), %ebp
+ movl 4(%esp), %edi # 4-byte Reload
+ subl (%ebp), %edi
+ movl %edi, 4(%esp) # 4-byte Spill
+ movl 28(%esp), %edi # 4-byte Reload
+ sbbl 4(%ebp), %edi
+ movl %edi, 28(%esp) # 4-byte Spill
+ movl 24(%esp), %edi # 4-byte Reload
+ sbbl 8(%ebp), %edi
+ movl %edi, 24(%esp) # 4-byte Spill
+ movl 20(%esp), %edi # 4-byte Reload
+ sbbl 12(%ebp), %edi
+ movl %edi, 20(%esp) # 4-byte Spill
+ movl 16(%esp), %edi # 4-byte Reload
+ sbbl 16(%ebp), %edi
+ movl %edi, 16(%esp) # 4-byte Spill
+ movl 12(%esp), %edi # 4-byte Reload
+ sbbl 20(%ebp), %edi
+ movl %edi, 12(%esp) # 4-byte Spill
+ movl 8(%esp), %edi # 4-byte Reload
+ sbbl 24(%ebp), %edi
+ movl %edi, 8(%esp) # 4-byte Spill
+ movl (%esp), %edi # 4-byte Reload
+ sbbl 28(%ebp), %edi
+ movl %edi, (%esp) # 4-byte Spill
+ sbbl 32(%ebp), %ecx
+ sbbl 36(%ebp), %eax
+ sbbl 40(%ebp), %edx
+ movl %edx, %edi
+ sbbl $0, %ebx
+ testb $1, %bl
+ jne .LBB168_2
+# BB#1: # %nocarry
+ movl 4(%esp), %ebx # 4-byte Reload
+ movl %ebx, (%esi)
+ movl 28(%esp), %ebx # 4-byte Reload
+ movl %ebx, 4(%esi)
+ movl 24(%esp), %ebx # 4-byte Reload
+ movl %ebx, 8(%esi)
+ movl 20(%esp), %ebx # 4-byte Reload
+ movl %ebx, 12(%esi)
+ movl 16(%esp), %ebx # 4-byte Reload
+ movl %ebx, 16(%esi)
+ movl 12(%esp), %ebx # 4-byte Reload
+ movl %ebx, 20(%esi)
+ movl 8(%esp), %ebx # 4-byte Reload
+ movl %ebx, 24(%esi)
+ movl (%esp), %edx # 4-byte Reload
+ movl %edx, 28(%esi)
+ movl %ecx, 32(%esi)
+ movl %eax, 36(%esi)
+ movl %edi, 40(%esi)
+.LBB168_2: # %carry
+ addl $32, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end168:
+ .size mcl_fp_add11L, .Lfunc_end168-mcl_fp_add11L
+
+ .globl mcl_fp_addNF11L
+ .align 16, 0x90
+ .type mcl_fp_addNF11L,@function
+mcl_fp_addNF11L: # @mcl_fp_addNF11L
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $80, %esp
+ movl 108(%esp), %edx
+ movl (%edx), %eax
+ movl 4(%edx), %ecx
+ movl 104(%esp), %esi
+ addl (%esi), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ adcl 4(%esi), %ecx
+ movl %ecx, 60(%esp) # 4-byte Spill
+ movl 40(%edx), %ebx
+ movl 36(%edx), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 32(%edx), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 28(%edx), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 24(%edx), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 20(%edx), %ebp
+ movl 16(%edx), %edi
+ movl 12(%edx), %eax
+ movl 8(%edx), %ecx
+ adcl 8(%esi), %ecx
+ adcl 12(%esi), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ adcl 16(%esi), %edi
+ movl %edi, 48(%esp) # 4-byte Spill
+ adcl 20(%esi), %ebp
+ movl %ebp, 44(%esp) # 4-byte Spill
+ movl 64(%esp), %edx # 4-byte Reload
+ adcl 24(%esi), %edx
+ movl %edx, 64(%esp) # 4-byte Spill
+ movl 76(%esp), %edx # 4-byte Reload
+ adcl 28(%esi), %edx
+ movl %edx, 76(%esp) # 4-byte Spill
+ movl 72(%esp), %edx # 4-byte Reload
+ adcl 32(%esi), %edx
+ movl %edx, 72(%esp) # 4-byte Spill
+ movl 68(%esp), %edx # 4-byte Reload
+ adcl 36(%esi), %edx
+ movl %edx, 68(%esp) # 4-byte Spill
+ movl %ecx, %edx
+ adcl 40(%esi), %ebx
+ movl %ebx, 56(%esp) # 4-byte Spill
+ movl 112(%esp), %ebx
+ movl 52(%esp), %esi # 4-byte Reload
+ subl (%ebx), %esi
+ movl 60(%esp), %ecx # 4-byte Reload
+ sbbl 4(%ebx), %ecx
+ movl %ecx, (%esp) # 4-byte Spill
+ movl %edx, %ecx
+ sbbl 8(%ebx), %ecx
+ movl %ecx, 4(%esp) # 4-byte Spill
+ sbbl 12(%ebx), %eax
+ movl %eax, 8(%esp) # 4-byte Spill
+ sbbl 16(%ebx), %edi
+ movl %edi, 12(%esp) # 4-byte Spill
+ sbbl 20(%ebx), %ebp
+ movl %ebp, 16(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ movl %eax, %ecx
+ sbbl 24(%ebx), %ecx
+ movl %ecx, 20(%esp) # 4-byte Spill
+ movl 76(%esp), %ecx # 4-byte Reload
+ sbbl 28(%ebx), %ecx
+ movl %ecx, 24(%esp) # 4-byte Spill
+ movl 72(%esp), %ecx # 4-byte Reload
+ sbbl 32(%ebx), %ecx
+ movl %ecx, 28(%esp) # 4-byte Spill
+ movl 68(%esp), %edi # 4-byte Reload
+ movl %edi, %ecx
+ movl %edi, %ebp
+ sbbl 36(%ebx), %ecx
+ movl %ecx, 32(%esp) # 4-byte Spill
+ movl 56(%esp), %ecx # 4-byte Reload
+ movl %ecx, %edi
+ sbbl 40(%ebx), %edi
+ movl %edi, 36(%esp) # 4-byte Spill
+ movl %edi, %ebx
+ movl 52(%esp), %edi # 4-byte Reload
+ sarl $31, %ebx
+ testl %ebx, %ebx
+ js .LBB169_2
+# BB#1:
+ movl %esi, %edi
+.LBB169_2:
+ movl 100(%esp), %esi
+ movl %edi, (%esi)
+ movl 60(%esp), %edi # 4-byte Reload
+ js .LBB169_4
+# BB#3:
+ movl (%esp), %edi # 4-byte Reload
+.LBB169_4:
+ movl %edi, 4(%esi)
+ movl %eax, %edi
+ js .LBB169_6
+# BB#5:
+ movl 4(%esp), %edx # 4-byte Reload
+.LBB169_6:
+ movl %edx, 8(%esi)
+ movl %ebp, %ecx
+ movl 72(%esp), %edx # 4-byte Reload
+ movl 40(%esp), %eax # 4-byte Reload
+ js .LBB169_8
+# BB#7:
+ movl 8(%esp), %eax # 4-byte Reload
+.LBB169_8:
+ movl %eax, 12(%esi)
+ movl 76(%esp), %eax # 4-byte Reload
+ movl 44(%esp), %ebp # 4-byte Reload
+ js .LBB169_10
+# BB#9:
+ movl 12(%esp), %ebx # 4-byte Reload
+ movl %ebx, 48(%esp) # 4-byte Spill
+.LBB169_10:
+ movl 48(%esp), %ebx # 4-byte Reload
+ movl %ebx, 16(%esi)
+ js .LBB169_12
+# BB#11:
+ movl 16(%esp), %ebp # 4-byte Reload
+.LBB169_12:
+ movl %ebp, 20(%esi)
+ js .LBB169_14
+# BB#13:
+ movl 20(%esp), %edi # 4-byte Reload
+.LBB169_14:
+ movl %edi, 24(%esi)
+ js .LBB169_16
+# BB#15:
+ movl 24(%esp), %eax # 4-byte Reload
+.LBB169_16:
+ movl %eax, 28(%esi)
+ js .LBB169_18
+# BB#17:
+ movl 28(%esp), %edx # 4-byte Reload
+.LBB169_18:
+ movl %edx, 32(%esi)
+ js .LBB169_20
+# BB#19:
+ movl 32(%esp), %ecx # 4-byte Reload
+.LBB169_20:
+ movl %ecx, 36(%esi)
+ movl 56(%esp), %eax # 4-byte Reload
+ js .LBB169_22
+# BB#21:
+ movl 36(%esp), %eax # 4-byte Reload
+.LBB169_22:
+ movl %eax, 40(%esi)
+ addl $80, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end169:
+ .size mcl_fp_addNF11L, .Lfunc_end169-mcl_fp_addNF11L
+
+ .globl mcl_fp_sub11L
+ .align 16, 0x90
+ .type mcl_fp_sub11L,@function
+mcl_fp_sub11L: # @mcl_fp_sub11L
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $40, %esp
+ movl 64(%esp), %ebp
+ movl (%ebp), %ecx
+ movl 4(%ebp), %eax
+ movl 68(%esp), %edi
+ subl (%edi), %ecx
+ movl %ecx, 16(%esp) # 4-byte Spill
+ sbbl 4(%edi), %eax
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl 8(%ebp), %eax
+ sbbl 8(%edi), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 12(%ebp), %ebx
+ sbbl 12(%edi), %ebx
+ movl 16(%ebp), %eax
+ sbbl 16(%edi), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 20(%ebp), %eax
+ sbbl 20(%edi), %eax
+ movl %eax, 24(%esp) # 4-byte Spill
+ movl 24(%ebp), %eax
+ sbbl 24(%edi), %eax
+ movl %eax, 20(%esp) # 4-byte Spill
+ movl 28(%ebp), %edx
+ sbbl 28(%edi), %edx
+ movl %edx, 4(%esp) # 4-byte Spill
+ movl 32(%ebp), %ecx
+ sbbl 32(%edi), %ecx
+ movl %ecx, (%esp) # 4-byte Spill
+ movl 36(%ebp), %eax
+ sbbl 36(%edi), %eax
+ movl %eax, 12(%esp) # 4-byte Spill
+ movl 40(%ebp), %eax
+ sbbl 40(%edi), %eax
+ movl %eax, 8(%esp) # 4-byte Spill
+ movl %ebx, %ebp
+ movl 16(%esp), %esi # 4-byte Reload
+ movl $0, %ebx
+ sbbl $0, %ebx
+ testb $1, %bl
+ movl 60(%esp), %ebx
+ movl %esi, (%ebx)
+ movl 28(%esp), %edi # 4-byte Reload
+ movl %edi, 4(%ebx)
+ movl 32(%esp), %edi # 4-byte Reload
+ movl %edi, 8(%ebx)
+ movl %ebp, 12(%ebx)
+ movl 36(%esp), %edi # 4-byte Reload
+ movl %edi, 16(%ebx)
+ movl 24(%esp), %edi # 4-byte Reload
+ movl %edi, 20(%ebx)
+ movl 20(%esp), %edi # 4-byte Reload
+ movl %edi, 24(%ebx)
+ movl %edx, 28(%ebx)
+ movl %ecx, 32(%ebx)
+ movl 12(%esp), %ecx # 4-byte Reload
+ movl %ecx, 36(%ebx)
+ movl %ecx, %edi
+ movl %eax, 40(%ebx)
+ je .LBB170_2
+# BB#1: # %carry
+ movl 72(%esp), %eax
+ addl (%eax), %esi
+ movl %esi, (%ebx)
+ movl 28(%esp), %edx # 4-byte Reload
+ movl %eax, %esi
+ adcl 4(%esi), %edx
+ movl %edx, 4(%ebx)
+ movl 32(%esp), %ecx # 4-byte Reload
+ adcl 8(%esi), %ecx
+ movl 12(%esi), %eax
+ adcl %ebp, %eax
+ movl %ecx, 8(%ebx)
+ movl 16(%esi), %ecx
+ adcl 36(%esp), %ecx # 4-byte Folded Reload
+ movl %eax, 12(%ebx)
+ movl 20(%esi), %eax
+ adcl 24(%esp), %eax # 4-byte Folded Reload
+ movl %ecx, 16(%ebx)
+ movl 24(%esi), %ecx
+ adcl 20(%esp), %ecx # 4-byte Folded Reload
+ movl %eax, 20(%ebx)
+ movl 28(%esi), %eax
+ adcl 4(%esp), %eax # 4-byte Folded Reload
+ movl %ecx, 24(%ebx)
+ movl 32(%esi), %ecx
+ adcl (%esp), %ecx # 4-byte Folded Reload
+ movl %eax, 28(%ebx)
+ movl %ecx, 32(%ebx)
+ movl 36(%esi), %eax
+ adcl %edi, %eax
+ movl %eax, 36(%ebx)
+ movl 40(%esi), %eax
+ adcl 8(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 40(%ebx)
+.LBB170_2: # %nocarry
+ addl $40, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end170:
+ .size mcl_fp_sub11L, .Lfunc_end170-mcl_fp_sub11L
+
+ .globl mcl_fp_subNF11L
+ .align 16, 0x90
+ .type mcl_fp_subNF11L,@function
+mcl_fp_subNF11L: # @mcl_fp_subNF11L
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $64, %esp
+ movl 88(%esp), %eax
+ movl (%eax), %edx
+ movl 4(%eax), %ecx
+ movl 92(%esp), %edi
+ subl (%edi), %edx
+ movl %edx, 44(%esp) # 4-byte Spill
+ sbbl 4(%edi), %ecx
+ movl %ecx, 48(%esp) # 4-byte Spill
+ movl 40(%eax), %ecx
+ movl %ecx, 52(%esp) # 4-byte Spill
+ movl 36(%eax), %ecx
+ movl %ecx, 60(%esp) # 4-byte Spill
+ movl 32(%eax), %ecx
+ movl %ecx, 56(%esp) # 4-byte Spill
+ movl 28(%eax), %ebx
+ movl 24(%eax), %ebp
+ movl 20(%eax), %esi
+ movl 16(%eax), %edx
+ movl 12(%eax), %ecx
+ movl 8(%eax), %eax
+ sbbl 8(%edi), %eax
+ movl %eax, 20(%esp) # 4-byte Spill
+ sbbl 12(%edi), %ecx
+ movl %ecx, 24(%esp) # 4-byte Spill
+ sbbl 16(%edi), %edx
+ movl %edx, 28(%esp) # 4-byte Spill
+ sbbl 20(%edi), %esi
+ movl %esi, 32(%esp) # 4-byte Spill
+ sbbl 24(%edi), %ebp
+ movl %ebp, 36(%esp) # 4-byte Spill
+ sbbl 28(%edi), %ebx
+ movl %ebx, 40(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ sbbl 32(%edi), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ sbbl 36(%edi), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 52(%esp), %edx # 4-byte Reload
+ sbbl 40(%edi), %edx
+ movl %edx, 52(%esp) # 4-byte Spill
+ movl %edx, %esi
+ sarl $31, %esi
+ movl %esi, %eax
+ shldl $1, %edx, %eax
+ movl 96(%esp), %edx
+ movl 4(%edx), %ecx
+ andl %eax, %ecx
+ movl %ecx, %ebx
+ andl (%edx), %eax
+ movl 40(%edx), %ecx
+ andl %esi, %ecx
+ movl %ecx, 16(%esp) # 4-byte Spill
+ movl 36(%edx), %ecx
+ andl %esi, %ecx
+ movl %ecx, 12(%esp) # 4-byte Spill
+ movl 32(%edx), %ecx
+ andl %esi, %ecx
+ movl %ecx, 8(%esp) # 4-byte Spill
+ movl 28(%edx), %ecx
+ andl %esi, %ecx
+ movl %ecx, 4(%esp) # 4-byte Spill
+ movl 24(%edx), %ecx
+ andl %esi, %ecx
+ movl %ecx, (%esp) # 4-byte Spill
+ movl 20(%edx), %ebp
+ andl %esi, %ebp
+ roll %esi
+ movl 16(%edx), %edi
+ andl %esi, %edi
+ movl 12(%edx), %ecx
+ andl %esi, %ecx
+ andl 8(%edx), %esi
+ addl 44(%esp), %eax # 4-byte Folded Reload
+ movl %ebx, %edx
+ adcl 48(%esp), %edx # 4-byte Folded Reload
+ movl 84(%esp), %ebx
+ movl %eax, (%ebx)
+ adcl 20(%esp), %esi # 4-byte Folded Reload
+ movl %edx, 4(%ebx)
+ adcl 24(%esp), %ecx # 4-byte Folded Reload
+ movl %esi, 8(%ebx)
+ adcl 28(%esp), %edi # 4-byte Folded Reload
+ movl %ecx, 12(%ebx)
+ adcl 32(%esp), %ebp # 4-byte Folded Reload
+ movl %edi, 16(%ebx)
+ movl (%esp), %ecx # 4-byte Reload
+ adcl 36(%esp), %ecx # 4-byte Folded Reload
+ movl %ebp, 20(%ebx)
+ movl 4(%esp), %eax # 4-byte Reload
+ adcl 40(%esp), %eax # 4-byte Folded Reload
+ movl %ecx, 24(%ebx)
+ movl 8(%esp), %ecx # 4-byte Reload
+ adcl 56(%esp), %ecx # 4-byte Folded Reload
+ movl %eax, 28(%ebx)
+ movl 12(%esp), %eax # 4-byte Reload
+ adcl 60(%esp), %eax # 4-byte Folded Reload
+ movl %ecx, 32(%ebx)
+ movl %eax, 36(%ebx)
+ movl 16(%esp), %eax # 4-byte Reload
+ adcl 52(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 40(%ebx)
+ addl $64, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end171:
+ .size mcl_fp_subNF11L, .Lfunc_end171-mcl_fp_subNF11L
+
+ .globl mcl_fpDbl_add11L
+ .align 16, 0x90
+ .type mcl_fpDbl_add11L,@function
+mcl_fpDbl_add11L: # @mcl_fpDbl_add11L
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $80, %esp
+ movl 108(%esp), %ecx
+ movl 104(%esp), %edi
+ movl 12(%edi), %esi
+ movl 16(%edi), %edx
+ movl 8(%ecx), %ebx
+ movl (%ecx), %ebp
+ addl (%edi), %ebp
+ movl 100(%esp), %eax
+ movl %ebp, (%eax)
+ movl 4(%ecx), %ebp
+ adcl 4(%edi), %ebp
+ adcl 8(%edi), %ebx
+ adcl 12(%ecx), %esi
+ adcl 16(%ecx), %edx
+ movl %ebp, 4(%eax)
+ movl 52(%ecx), %ebp
+ movl %ebx, 8(%eax)
+ movl 20(%ecx), %ebx
+ movl %esi, 12(%eax)
+ movl 20(%edi), %esi
+ adcl %ebx, %esi
+ movl 24(%ecx), %ebx
+ movl %edx, 16(%eax)
+ movl 24(%edi), %edx
+ adcl %ebx, %edx
+ movl 28(%ecx), %ebx
+ movl %esi, 20(%eax)
+ movl 28(%edi), %esi
+ adcl %ebx, %esi
+ movl 32(%ecx), %ebx
+ movl %edx, 24(%eax)
+ movl 32(%edi), %edx
+ adcl %ebx, %edx
+ movl 36(%ecx), %ebx
+ movl %esi, 28(%eax)
+ movl 36(%edi), %esi
+ adcl %ebx, %esi
+ movl 40(%ecx), %ebx
+ movl %edx, 32(%eax)
+ movl 40(%edi), %edx
+ adcl %ebx, %edx
+ movl 44(%ecx), %ebx
+ movl %esi, 36(%eax)
+ movl 44(%edi), %esi
+ adcl %ebx, %esi
+ movl %esi, 68(%esp) # 4-byte Spill
+ movl 48(%ecx), %esi
+ movl %edx, 40(%eax)
+ movl 48(%edi), %eax
+ adcl %esi, %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 52(%edi), %eax
+ adcl %ebp, %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 56(%ecx), %edx
+ movl 56(%edi), %eax
+ adcl %edx, %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 60(%ecx), %edx
+ movl 60(%edi), %eax
+ adcl %edx, %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 64(%ecx), %edx
+ movl 64(%edi), %eax
+ adcl %edx, %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 68(%ecx), %eax
+ movl 68(%edi), %edx
+ adcl %eax, %edx
+ movl 72(%ecx), %esi
+ movl 72(%edi), %eax
+ adcl %esi, %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 76(%ecx), %ebx
+ movl 76(%edi), %esi
+ adcl %ebx, %esi
+ movl %esi, 48(%esp) # 4-byte Spill
+ movl 80(%ecx), %ebp
+ movl 80(%edi), %ebx
+ adcl %ebp, %ebx
+ movl %ebx, 52(%esp) # 4-byte Spill
+ movl 84(%ecx), %ecx
+ movl 84(%edi), %edi
+ adcl %ecx, %edi
+ movl %edi, 40(%esp) # 4-byte Spill
+ sbbl %ecx, %ecx
+ andl $1, %ecx
+ movl 112(%esp), %ebp
+ movl 68(%esp), %edi # 4-byte Reload
+ subl (%ebp), %edi
+ movl %edi, 24(%esp) # 4-byte Spill
+ movl 72(%esp), %edi # 4-byte Reload
+ sbbl 4(%ebp), %edi
+ movl %edi, 20(%esp) # 4-byte Spill
+ movl 76(%esp), %edi # 4-byte Reload
+ sbbl 8(%ebp), %edi
+ movl %edi, 16(%esp) # 4-byte Spill
+ movl 56(%esp), %edi # 4-byte Reload
+ sbbl 12(%ebp), %edi
+ movl %edi, 12(%esp) # 4-byte Spill
+ movl 60(%esp), %edi # 4-byte Reload
+ sbbl 16(%ebp), %edi
+ movl %edi, 8(%esp) # 4-byte Spill
+ movl 64(%esp), %edi # 4-byte Reload
+ sbbl 20(%ebp), %edi
+ movl %edi, 4(%esp) # 4-byte Spill
+ movl %edx, %edi
+ sbbl 24(%ebp), %edi
+ movl %edi, (%esp) # 4-byte Spill
+ sbbl 28(%ebp), %eax
+ movl %eax, 28(%esp) # 4-byte Spill
+ sbbl 32(%ebp), %esi
+ movl %esi, 32(%esp) # 4-byte Spill
+ movl %ebx, %eax
+ movl 40(%esp), %ebx # 4-byte Reload
+ sbbl 36(%ebp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl %ebx, %edi
+ sbbl 40(%ebp), %edi
+ sbbl $0, %ecx
+ andl $1, %ecx
+ jne .LBB172_2
+# BB#1:
+ movl %edi, %ebx
+.LBB172_2:
+ testb %cl, %cl
+ movl 68(%esp), %ecx # 4-byte Reload
+ movl 64(%esp), %esi # 4-byte Reload
+ movl 60(%esp), %edi # 4-byte Reload
+ movl 56(%esp), %ebp # 4-byte Reload
+ jne .LBB172_4
+# BB#3:
+ movl (%esp), %edx # 4-byte Reload
+ movl 4(%esp), %esi # 4-byte Reload
+ movl 8(%esp), %edi # 4-byte Reload
+ movl 12(%esp), %ebp # 4-byte Reload
+ movl 16(%esp), %eax # 4-byte Reload
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 20(%esp), %eax # 4-byte Reload
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 24(%esp), %ecx # 4-byte Reload
+.LBB172_4:
+ movl 100(%esp), %eax
+ movl %ecx, 44(%eax)
+ movl 72(%esp), %ecx # 4-byte Reload
+ movl %ecx, 48(%eax)
+ movl 76(%esp), %ecx # 4-byte Reload
+ movl %ecx, 52(%eax)
+ movl %ebp, 56(%eax)
+ movl %edi, 60(%eax)
+ movl %esi, 64(%eax)
+ movl %edx, 68(%eax)
+ movl 52(%esp), %ecx # 4-byte Reload
+ movl 44(%esp), %edx # 4-byte Reload
+ jne .LBB172_6
+# BB#5:
+ movl 28(%esp), %edx # 4-byte Reload
+.LBB172_6:
+ movl %edx, 72(%eax)
+ movl 48(%esp), %edx # 4-byte Reload
+ jne .LBB172_8
+# BB#7:
+ movl 32(%esp), %edx # 4-byte Reload
+.LBB172_8:
+ movl %edx, 76(%eax)
+ jne .LBB172_10
+# BB#9:
+ movl 36(%esp), %ecx # 4-byte Reload
+.LBB172_10:
+ movl %ecx, 80(%eax)
+ movl %ebx, 84(%eax)
+ addl $80, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end172:
+ .size mcl_fpDbl_add11L, .Lfunc_end172-mcl_fpDbl_add11L
+
+ .globl mcl_fpDbl_sub11L
+ .align 16, 0x90
+ .type mcl_fpDbl_sub11L,@function
+mcl_fpDbl_sub11L: # @mcl_fpDbl_sub11L
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $72, %esp
+ movl 96(%esp), %edx
+ movl (%edx), %eax
+ movl 4(%edx), %esi
+ movl 100(%esp), %ebp
+ subl (%ebp), %eax
+ sbbl 4(%ebp), %esi
+ movl 8(%edx), %edi
+ sbbl 8(%ebp), %edi
+ movl 92(%esp), %ecx
+ movl %eax, (%ecx)
+ movl 12(%edx), %eax
+ sbbl 12(%ebp), %eax
+ movl %esi, 4(%ecx)
+ movl 16(%edx), %esi
+ sbbl 16(%ebp), %esi
+ movl %edi, 8(%ecx)
+ movl 20(%ebp), %edi
+ movl %eax, 12(%ecx)
+ movl 20(%edx), %eax
+ sbbl %edi, %eax
+ movl 24(%ebp), %edi
+ movl %esi, 16(%ecx)
+ movl 24(%edx), %esi
+ sbbl %edi, %esi
+ movl 28(%ebp), %edi
+ movl %eax, 20(%ecx)
+ movl 28(%edx), %eax
+ sbbl %edi, %eax
+ movl 32(%ebp), %edi
+ movl %esi, 24(%ecx)
+ movl 32(%edx), %esi
+ sbbl %edi, %esi
+ movl 36(%ebp), %edi
+ movl %eax, 28(%ecx)
+ movl 36(%edx), %eax
+ sbbl %edi, %eax
+ movl 40(%ebp), %edi
+ movl %esi, 32(%ecx)
+ movl 40(%edx), %esi
+ sbbl %edi, %esi
+ movl 44(%ebp), %edi
+ movl %eax, 36(%ecx)
+ movl 44(%edx), %eax
+ sbbl %edi, %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 48(%ebp), %eax
+ movl %esi, 40(%ecx)
+ movl 48(%edx), %esi
+ sbbl %eax, %esi
+ movl %esi, 24(%esp) # 4-byte Spill
+ movl 52(%ebp), %eax
+ movl 52(%edx), %esi
+ sbbl %eax, %esi
+ movl %esi, 32(%esp) # 4-byte Spill
+ movl 56(%ebp), %eax
+ movl 56(%edx), %esi
+ sbbl %eax, %esi
+ movl %esi, 40(%esp) # 4-byte Spill
+ movl 60(%ebp), %eax
+ movl 60(%edx), %esi
+ sbbl %eax, %esi
+ movl %esi, 44(%esp) # 4-byte Spill
+ movl 64(%ebp), %eax
+ movl 64(%edx), %esi
+ sbbl %eax, %esi
+ movl %esi, 48(%esp) # 4-byte Spill
+ movl 68(%ebp), %eax
+ movl 68(%edx), %esi
+ sbbl %eax, %esi
+ movl %esi, 52(%esp) # 4-byte Spill
+ movl 72(%ebp), %eax
+ movl 72(%edx), %esi
+ sbbl %eax, %esi
+ movl %esi, 56(%esp) # 4-byte Spill
+ movl 76(%ebp), %eax
+ movl 76(%edx), %esi
+ sbbl %eax, %esi
+ movl %esi, 60(%esp) # 4-byte Spill
+ movl 80(%ebp), %eax
+ movl 80(%edx), %esi
+ sbbl %eax, %esi
+ movl %esi, 64(%esp) # 4-byte Spill
+ movl 84(%ebp), %eax
+ movl 84(%edx), %edx
+ sbbl %eax, %edx
+ movl %edx, 68(%esp) # 4-byte Spill
+ movl $0, %eax
+ sbbl $0, %eax
+ andl $1, %eax
+ movl 104(%esp), %ebp
+ jne .LBB173_1
+# BB#2:
+ movl $0, 28(%esp) # 4-byte Folded Spill
+ jmp .LBB173_3
+.LBB173_1:
+ movl 40(%ebp), %edx
+ movl %edx, 28(%esp) # 4-byte Spill
+.LBB173_3:
+ testb %al, %al
+ jne .LBB173_4
+# BB#5:
+ movl $0, 16(%esp) # 4-byte Folded Spill
+ movl $0, 8(%esp) # 4-byte Folded Spill
+ jmp .LBB173_6
+.LBB173_4:
+ movl (%ebp), %eax
+ movl %eax, 8(%esp) # 4-byte Spill
+ movl 4(%ebp), %eax
+ movl %eax, 16(%esp) # 4-byte Spill
+.LBB173_6:
+ jne .LBB173_7
+# BB#8:
+ movl $0, 20(%esp) # 4-byte Folded Spill
+ jmp .LBB173_9
+.LBB173_7:
+ movl 36(%ebp), %eax
+ movl %eax, 20(%esp) # 4-byte Spill
+.LBB173_9:
+ jne .LBB173_10
+# BB#11:
+ movl $0, 12(%esp) # 4-byte Folded Spill
+ jmp .LBB173_12
+.LBB173_10:
+ movl 32(%ebp), %eax
+ movl %eax, 12(%esp) # 4-byte Spill
+.LBB173_12:
+ jne .LBB173_13
+# BB#14:
+ movl $0, 4(%esp) # 4-byte Folded Spill
+ jmp .LBB173_15
+.LBB173_13:
+ movl 28(%ebp), %eax
+ movl %eax, 4(%esp) # 4-byte Spill
+.LBB173_15:
+ jne .LBB173_16
+# BB#17:
+ movl $0, (%esp) # 4-byte Folded Spill
+ jmp .LBB173_18
+.LBB173_16:
+ movl 24(%ebp), %eax
+ movl %eax, (%esp) # 4-byte Spill
+.LBB173_18:
+ jne .LBB173_19
+# BB#20:
+ movl $0, %edx
+ jmp .LBB173_21
+.LBB173_19:
+ movl 20(%ebp), %edx
+.LBB173_21:
+ jne .LBB173_22
+# BB#23:
+ movl $0, %edi
+ jmp .LBB173_24
+.LBB173_22:
+ movl 16(%ebp), %edi
+.LBB173_24:
+ jne .LBB173_25
+# BB#26:
+ movl $0, %ebx
+ jmp .LBB173_27
+.LBB173_25:
+ movl 12(%ebp), %ebx
+.LBB173_27:
+ jne .LBB173_28
+# BB#29:
+ xorl %ebp, %ebp
+ jmp .LBB173_30
+.LBB173_28:
+ movl 8(%ebp), %ebp
+.LBB173_30:
+ movl 8(%esp), %esi # 4-byte Reload
+ addl 36(%esp), %esi # 4-byte Folded Reload
+ movl 16(%esp), %eax # 4-byte Reload
+ adcl 24(%esp), %eax # 4-byte Folded Reload
+ movl %esi, 44(%ecx)
+ adcl 32(%esp), %ebp # 4-byte Folded Reload
+ movl %eax, 48(%ecx)
+ adcl 40(%esp), %ebx # 4-byte Folded Reload
+ movl %ebp, 52(%ecx)
+ adcl 44(%esp), %edi # 4-byte Folded Reload
+ movl %ebx, 56(%ecx)
+ adcl 48(%esp), %edx # 4-byte Folded Reload
+ movl %edi, 60(%ecx)
+ movl (%esp), %esi # 4-byte Reload
+ adcl 52(%esp), %esi # 4-byte Folded Reload
+ movl %edx, 64(%ecx)
+ movl 4(%esp), %eax # 4-byte Reload
+ adcl 56(%esp), %eax # 4-byte Folded Reload
+ movl %esi, 68(%ecx)
+ movl 12(%esp), %edx # 4-byte Reload
+ adcl 60(%esp), %edx # 4-byte Folded Reload
+ movl %eax, 72(%ecx)
+ movl 20(%esp), %eax # 4-byte Reload
+ adcl 64(%esp), %eax # 4-byte Folded Reload
+ movl %edx, 76(%ecx)
+ movl %eax, 80(%ecx)
+ movl 28(%esp), %eax # 4-byte Reload
+ adcl 68(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 84(%ecx)
+ addl $72, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end173:
+ .size mcl_fpDbl_sub11L, .Lfunc_end173-mcl_fpDbl_sub11L
+
+ .align 16, 0x90
+ .type .LmulPv384x32,@function
+.LmulPv384x32: # @mulPv384x32
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $80, %esp
+ movl %edx, %ebx
+ movl 100(%esp), %ebp
+ movl %ebp, %eax
+ mull 44(%ebx)
+ movl %edx, 76(%esp) # 4-byte Spill
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl %ebp, %eax
+ mull 40(%ebx)
+ movl %edx, 68(%esp) # 4-byte Spill
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl %ebp, %eax
+ mull 36(%ebx)
+ movl %edx, 60(%esp) # 4-byte Spill
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl %ebp, %eax
+ mull 32(%ebx)
+ movl %edx, 52(%esp) # 4-byte Spill
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl %ebp, %eax
+ mull 28(%ebx)
+ movl %edx, 44(%esp) # 4-byte Spill
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl %ebp, %eax
+ mull 24(%ebx)
+ movl %edx, 36(%esp) # 4-byte Spill
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl %ebp, %eax
+ mull 20(%ebx)
+ movl %edx, 28(%esp) # 4-byte Spill
+ movl %eax, 24(%esp) # 4-byte Spill
+ movl %ebp, %eax
+ mull 16(%ebx)
+ movl %edx, 20(%esp) # 4-byte Spill
+ movl %eax, 16(%esp) # 4-byte Spill
+ movl %ebp, %eax
+ mull 12(%ebx)
+ movl %edx, 12(%esp) # 4-byte Spill
+ movl %eax, 8(%esp) # 4-byte Spill
+ movl %ebp, %eax
+ mull 8(%ebx)
+ movl %edx, %edi
+ movl %eax, 4(%esp) # 4-byte Spill
+ movl %ebp, %eax
+ mull 4(%ebx)
+ movl %edx, %esi
+ movl %eax, (%esp) # 4-byte Spill
+ movl %ebp, %eax
+ mull (%ebx)
+ movl %eax, (%ecx)
+ addl (%esp), %edx # 4-byte Folded Reload
+ movl %edx, 4(%ecx)
+ adcl 4(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 8(%ecx)
+ adcl 8(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 12(%ecx)
+ movl 12(%esp), %eax # 4-byte Reload
+ adcl 16(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 16(%ecx)
+ movl 20(%esp), %eax # 4-byte Reload
+ adcl 24(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 20(%ecx)
+ movl 28(%esp), %eax # 4-byte Reload
+ adcl 32(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 24(%ecx)
+ movl 36(%esp), %eax # 4-byte Reload
+ adcl 40(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 28(%ecx)
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 48(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 32(%ecx)
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 56(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 36(%ecx)
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 64(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 40(%ecx)
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 72(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 44(%ecx)
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl $0, %eax
+ movl %eax, 48(%ecx)
+ movl %ecx, %eax
+ addl $80, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end174:
+ .size .LmulPv384x32, .Lfunc_end174-.LmulPv384x32
+
+ .globl mcl_fp_mulUnitPre12L
+ .align 16, 0x90
+ .type mcl_fp_mulUnitPre12L,@function
+mcl_fp_mulUnitPre12L: # @mcl_fp_mulUnitPre12L
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $92, %esp
+ calll .L175$pb
+.L175$pb:
+ popl %ebx
+.Ltmp26:
+ addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp26-.L175$pb), %ebx
+ movl 120(%esp), %eax
+ movl %eax, (%esp)
+ leal 40(%esp), %ecx
+ movl 116(%esp), %edx
+ calll .LmulPv384x32
+ movl 88(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 84(%esp), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 80(%esp), %eax
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl 76(%esp), %eax
+ movl %eax, 24(%esp) # 4-byte Spill
+ movl 72(%esp), %eax
+ movl %eax, 20(%esp) # 4-byte Spill
+ movl 68(%esp), %eax
+ movl %eax, 16(%esp) # 4-byte Spill
+ movl 64(%esp), %eax
+ movl %eax, 12(%esp) # 4-byte Spill
+ movl 60(%esp), %ebp
+ movl 56(%esp), %ebx
+ movl 52(%esp), %edi
+ movl 48(%esp), %esi
+ movl 40(%esp), %edx
+ movl 44(%esp), %ecx
+ movl 112(%esp), %eax
+ movl %edx, (%eax)
+ movl %ecx, 4(%eax)
+ movl %esi, 8(%eax)
+ movl %edi, 12(%eax)
+ movl %ebx, 16(%eax)
+ movl %ebp, 20(%eax)
+ movl 12(%esp), %ecx # 4-byte Reload
+ movl %ecx, 24(%eax)
+ movl 16(%esp), %ecx # 4-byte Reload
+ movl %ecx, 28(%eax)
+ movl 20(%esp), %ecx # 4-byte Reload
+ movl %ecx, 32(%eax)
+ movl 24(%esp), %ecx # 4-byte Reload
+ movl %ecx, 36(%eax)
+ movl 28(%esp), %ecx # 4-byte Reload
+ movl %ecx, 40(%eax)
+ movl 32(%esp), %ecx # 4-byte Reload
+ movl %ecx, 44(%eax)
+ movl 36(%esp), %ecx # 4-byte Reload
+ movl %ecx, 48(%eax)
+ addl $92, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end175:
+ .size mcl_fp_mulUnitPre12L, .Lfunc_end175-mcl_fp_mulUnitPre12L
+
+ .globl mcl_fpDbl_mulPre12L
+ .align 16, 0x90
+ .type mcl_fpDbl_mulPre12L,@function
+mcl_fpDbl_mulPre12L: # @mcl_fpDbl_mulPre12L
+# BB#0:
+ pushl %ebp
+ movl %esp, %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $220, %esp
+ calll .L176$pb
+.L176$pb:
+ popl %ebx
+.Ltmp27:
+ addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp27-.L176$pb), %ebx
+ movl %ebx, -164(%ebp) # 4-byte Spill
+ movl 16(%ebp), %esi
+ movl %esi, 8(%esp)
+ movl 12(%ebp), %edi
+ movl %edi, 4(%esp)
+ movl 8(%ebp), %eax
+ movl %eax, (%esp)
+ calll mcl_fpDbl_mulPre6L@PLT
+ leal 24(%esi), %eax
+ movl %eax, 8(%esp)
+ leal 24(%edi), %eax
+ movl %eax, 4(%esp)
+ movl 8(%ebp), %eax
+ leal 48(%eax), %eax
+ movl %eax, (%esp)
+ calll mcl_fpDbl_mulPre6L@PLT
+ movl 40(%edi), %ebx
+ movl 36(%edi), %eax
+ movl 32(%edi), %edx
+ movl (%edi), %esi
+ movl 4(%edi), %ecx
+ addl 24(%edi), %esi
+ adcl 28(%edi), %ecx
+ movl %ecx, -172(%ebp) # 4-byte Spill
+ adcl 8(%edi), %edx
+ movl %edx, -188(%ebp) # 4-byte Spill
+ adcl 12(%edi), %eax
+ movl %eax, -168(%ebp) # 4-byte Spill
+ adcl 16(%edi), %ebx
+ movl %ebx, -180(%ebp) # 4-byte Spill
+ seto %al
+ lahf
+ movl %eax, %eax
+ movl %eax, -112(%ebp) # 4-byte Spill
+ movl 16(%ebp), %edi
+ movl (%edi), %eax
+ addl 24(%edi), %eax
+ movl %eax, -136(%ebp) # 4-byte Spill
+ movl 4(%edi), %eax
+ adcl 28(%edi), %eax
+ movl %eax, -140(%ebp) # 4-byte Spill
+ movl 32(%edi), %eax
+ adcl 8(%edi), %eax
+ movl %eax, -144(%ebp) # 4-byte Spill
+ movl 36(%edi), %eax
+ adcl 12(%edi), %eax
+ movl %eax, -148(%ebp) # 4-byte Spill
+ movl 40(%edi), %ecx
+ adcl 16(%edi), %ecx
+ movl 44(%edi), %eax
+ adcl 20(%edi), %eax
+ pushl %eax
+ seto %al
+ lahf
+ movl %eax, %edi
+ popl %eax
+ movl %edi, -184(%ebp) # 4-byte Spill
+ movl %ebx, %edi
+ movl %edx, -156(%ebp) # 4-byte Spill
+ movl %esi, -160(%ebp) # 4-byte Spill
+ movl %esi, %edx
+ jb .LBB176_2
+# BB#1:
+ xorl %edi, %edi
+ movl $0, -156(%ebp) # 4-byte Folded Spill
+ movl $0, -160(%ebp) # 4-byte Folded Spill
+.LBB176_2:
+ movl %edi, -176(%ebp) # 4-byte Spill
+ movl 12(%ebp), %esi
+ movl 44(%esi), %edi
+ movl -112(%ebp), %ebx # 4-byte Reload
+ pushl %eax
+ movl %ebx, %eax
+ addb $127, %al
+ sahf
+ popl %eax
+ adcl 20(%esi), %edi
+ movl %edi, -132(%ebp) # 4-byte Spill
+ movl %eax, -124(%ebp) # 4-byte Spill
+ movl %ecx, -112(%ebp) # 4-byte Spill
+ movl -148(%ebp), %esi # 4-byte Reload
+ movl %esi, -116(%ebp) # 4-byte Spill
+ movl -144(%ebp), %esi # 4-byte Reload
+ movl %esi, -120(%ebp) # 4-byte Spill
+ movl -140(%ebp), %esi # 4-byte Reload
+ movl %esi, -128(%ebp) # 4-byte Spill
+ movl -136(%ebp), %esi # 4-byte Reload
+ movl %esi, -152(%ebp) # 4-byte Spill
+ jb .LBB176_4
+# BB#3:
+ movl $0, -124(%ebp) # 4-byte Folded Spill
+ movl $0, -112(%ebp) # 4-byte Folded Spill
+ movl $0, -116(%ebp) # 4-byte Folded Spill
+ movl $0, -120(%ebp) # 4-byte Folded Spill
+ movl $0, -128(%ebp) # 4-byte Folded Spill
+ movl $0, -152(%ebp) # 4-byte Folded Spill
+.LBB176_4:
+ movl %edx, -84(%ebp)
+ movl -172(%ebp), %esi # 4-byte Reload
+ movl %esi, -80(%ebp)
+ movl -188(%ebp), %edx # 4-byte Reload
+ movl %edx, -76(%ebp)
+ movl -168(%ebp), %edi # 4-byte Reload
+ movl %edi, -72(%ebp)
+ movl -180(%ebp), %edx # 4-byte Reload
+ movl %edx, -68(%ebp)
+ movl -136(%ebp), %edx # 4-byte Reload
+ movl %edx, -108(%ebp)
+ movl -140(%ebp), %edx # 4-byte Reload
+ movl %edx, -104(%ebp)
+ movl -144(%ebp), %edx # 4-byte Reload
+ movl %edx, -100(%ebp)
+ movl -148(%ebp), %edx # 4-byte Reload
+ movl %edx, -96(%ebp)
+ movl %ecx, -92(%ebp)
+ movl %eax, -88(%ebp)
+ movl %edi, %ebx
+ sbbl %edx, %edx
+ movl -132(%ebp), %eax # 4-byte Reload
+ movl %eax, -64(%ebp)
+ movl -184(%ebp), %ecx # 4-byte Reload
+ pushl %eax
+ movl %ecx, %eax
+ addb $127, %al
+ sahf
+ popl %eax
+ jb .LBB176_6
+# BB#5:
+ movl $0, %eax
+ movl $0, %ebx
+ movl $0, %esi
+.LBB176_6:
+ movl %eax, -132(%ebp) # 4-byte Spill
+ sbbl %eax, %eax
+ leal -108(%ebp), %ecx
+ movl %ecx, 8(%esp)
+ leal -84(%ebp), %ecx
+ movl %ecx, 4(%esp)
+ leal -60(%ebp), %ecx
+ movl %ecx, (%esp)
+ andl %eax, %edx
+ movl -152(%ebp), %edi # 4-byte Reload
+ addl -160(%ebp), %edi # 4-byte Folded Reload
+ adcl %esi, -128(%ebp) # 4-byte Folded Spill
+ movl -156(%ebp), %eax # 4-byte Reload
+ adcl %eax, -120(%ebp) # 4-byte Folded Spill
+ adcl %ebx, -116(%ebp) # 4-byte Folded Spill
+ movl -176(%ebp), %eax # 4-byte Reload
+ adcl %eax, -112(%ebp) # 4-byte Folded Spill
+ movl -132(%ebp), %eax # 4-byte Reload
+ adcl %eax, -124(%ebp) # 4-byte Folded Spill
+ sbbl %esi, %esi
+ andl $1, %esi
+ andl $1, %edx
+ movl %edx, -132(%ebp) # 4-byte Spill
+ movl -164(%ebp), %ebx # 4-byte Reload
+ calll mcl_fpDbl_mulPre6L@PLT
+ addl -36(%ebp), %edi
+ movl -128(%ebp), %eax # 4-byte Reload
+ adcl -32(%ebp), %eax
+ movl %eax, -128(%ebp) # 4-byte Spill
+ movl -120(%ebp), %eax # 4-byte Reload
+ adcl -28(%ebp), %eax
+ movl %eax, -120(%ebp) # 4-byte Spill
+ movl -116(%ebp), %eax # 4-byte Reload
+ adcl -24(%ebp), %eax
+ movl %eax, -116(%ebp) # 4-byte Spill
+ movl -112(%ebp), %eax # 4-byte Reload
+ adcl -20(%ebp), %eax
+ movl %eax, -112(%ebp) # 4-byte Spill
+ movl -124(%ebp), %eax # 4-byte Reload
+ adcl -16(%ebp), %eax
+ movl %eax, -124(%ebp) # 4-byte Spill
+ adcl %esi, -132(%ebp) # 4-byte Folded Spill
+ movl -60(%ebp), %ecx
+ movl 8(%ebp), %eax
+ subl (%eax), %ecx
+ movl %ecx, -144(%ebp) # 4-byte Spill
+ movl -56(%ebp), %esi
+ sbbl 4(%eax), %esi
+ movl -52(%ebp), %ecx
+ sbbl 8(%eax), %ecx
+ movl %ecx, -136(%ebp) # 4-byte Spill
+ movl -48(%ebp), %edx
+ sbbl 12(%eax), %edx
+ movl -44(%ebp), %ebx
+ sbbl 16(%eax), %ebx
+ movl -40(%ebp), %ecx
+ sbbl 20(%eax), %ecx
+ movl %ecx, -140(%ebp) # 4-byte Spill
+ movl 24(%eax), %ecx
+ movl %ecx, -148(%ebp) # 4-byte Spill
+ sbbl %ecx, %edi
+ movl 28(%eax), %ecx
+ movl %ecx, -152(%ebp) # 4-byte Spill
+ sbbl %ecx, -128(%ebp) # 4-byte Folded Spill
+ movl 32(%eax), %ecx
+ movl %ecx, -156(%ebp) # 4-byte Spill
+ sbbl %ecx, -120(%ebp) # 4-byte Folded Spill
+ movl 36(%eax), %ecx
+ movl %ecx, -160(%ebp) # 4-byte Spill
+ sbbl %ecx, -116(%ebp) # 4-byte Folded Spill
+ movl 40(%eax), %ecx
+ movl %ecx, -164(%ebp) # 4-byte Spill
+ sbbl %ecx, -112(%ebp) # 4-byte Folded Spill
+ movl 44(%eax), %ecx
+ movl %ecx, -168(%ebp) # 4-byte Spill
+ sbbl %ecx, -124(%ebp) # 4-byte Folded Spill
+ sbbl $0, -132(%ebp) # 4-byte Folded Spill
+ movl 48(%eax), %ecx
+ movl %ecx, -192(%ebp) # 4-byte Spill
+ subl %ecx, -144(%ebp) # 4-byte Folded Spill
+ movl 52(%eax), %ecx
+ movl %ecx, -196(%ebp) # 4-byte Spill
+ sbbl %ecx, %esi
+ movl 56(%eax), %ecx
+ movl %ecx, -200(%ebp) # 4-byte Spill
+ sbbl %ecx, -136(%ebp) # 4-byte Folded Spill
+ movl 60(%eax), %ecx
+ movl %ecx, -204(%ebp) # 4-byte Spill
+ sbbl %ecx, %edx
+ movl 64(%eax), %ecx
+ movl %ecx, -208(%ebp) # 4-byte Spill
+ sbbl %ecx, %ebx
+ movl 68(%eax), %ecx
+ movl %ecx, -212(%ebp) # 4-byte Spill
+ sbbl %ecx, -140(%ebp) # 4-byte Folded Spill
+ movl 72(%eax), %ecx
+ movl %ecx, -216(%ebp) # 4-byte Spill
+ sbbl %ecx, %edi
+ movl 76(%eax), %ecx
+ movl %ecx, -172(%ebp) # 4-byte Spill
+ sbbl %ecx, -128(%ebp) # 4-byte Folded Spill
+ movl 80(%eax), %ecx
+ movl %ecx, -176(%ebp) # 4-byte Spill
+ sbbl %ecx, -120(%ebp) # 4-byte Folded Spill
+ movl 84(%eax), %ecx
+ movl %ecx, -180(%ebp) # 4-byte Spill
+ sbbl %ecx, -116(%ebp) # 4-byte Folded Spill
+ movl 88(%eax), %ecx
+ movl %ecx, -184(%ebp) # 4-byte Spill
+ sbbl %ecx, -112(%ebp) # 4-byte Folded Spill
+ movl 92(%eax), %ecx
+ movl %ecx, -188(%ebp) # 4-byte Spill
+ sbbl %ecx, -124(%ebp) # 4-byte Folded Spill
+ sbbl $0, -132(%ebp) # 4-byte Folded Spill
+ movl -144(%ebp), %ecx # 4-byte Reload
+ addl -148(%ebp), %ecx # 4-byte Folded Reload
+ adcl -152(%ebp), %esi # 4-byte Folded Reload
+ movl %ecx, 24(%eax)
+ movl -136(%ebp), %ecx # 4-byte Reload
+ adcl -156(%ebp), %ecx # 4-byte Folded Reload
+ movl %esi, 28(%eax)
+ adcl -160(%ebp), %edx # 4-byte Folded Reload
+ movl %ecx, 32(%eax)
+ adcl -164(%ebp), %ebx # 4-byte Folded Reload
+ movl %edx, 36(%eax)
+ movl -140(%ebp), %ecx # 4-byte Reload
+ adcl -168(%ebp), %ecx # 4-byte Folded Reload
+ movl %ebx, 40(%eax)
+ adcl -192(%ebp), %edi # 4-byte Folded Reload
+ movl %ecx, 44(%eax)
+ movl -128(%ebp), %ecx # 4-byte Reload
+ adcl -196(%ebp), %ecx # 4-byte Folded Reload
+ movl %edi, 48(%eax)
+ movl -120(%ebp), %edx # 4-byte Reload
+ adcl -200(%ebp), %edx # 4-byte Folded Reload
+ movl %ecx, 52(%eax)
+ movl -116(%ebp), %ecx # 4-byte Reload
+ adcl -204(%ebp), %ecx # 4-byte Folded Reload
+ movl %edx, 56(%eax)
+ movl -112(%ebp), %edx # 4-byte Reload
+ adcl -208(%ebp), %edx # 4-byte Folded Reload
+ movl %ecx, 60(%eax)
+ movl -124(%ebp), %ecx # 4-byte Reload
+ adcl -212(%ebp), %ecx # 4-byte Folded Reload
+ movl %edx, 64(%eax)
+ movl -132(%ebp), %edx # 4-byte Reload
+ adcl -216(%ebp), %edx # 4-byte Folded Reload
+ movl %ecx, 68(%eax)
+ movl %edx, 72(%eax)
+ movl -172(%ebp), %ecx # 4-byte Reload
+ adcl $0, %ecx
+ movl %ecx, 76(%eax)
+ movl -176(%ebp), %ecx # 4-byte Reload
+ adcl $0, %ecx
+ movl %ecx, 80(%eax)
+ movl -180(%ebp), %ecx # 4-byte Reload
+ adcl $0, %ecx
+ movl %ecx, 84(%eax)
+ movl -184(%ebp), %ecx # 4-byte Reload
+ adcl $0, %ecx
+ movl %ecx, 88(%eax)
+ movl -188(%ebp), %ecx # 4-byte Reload
+ adcl $0, %ecx
+ movl %ecx, 92(%eax)
+ addl $220, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end176:
+ .size mcl_fpDbl_mulPre12L, .Lfunc_end176-mcl_fpDbl_mulPre12L
+
+ .globl mcl_fpDbl_sqrPre12L
+ .align 16, 0x90
+ .type mcl_fpDbl_sqrPre12L,@function
+mcl_fpDbl_sqrPre12L: # @mcl_fpDbl_sqrPre12L
+# BB#0:
+ pushl %ebp
+ movl %esp, %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $220, %esp
+ calll .L177$pb
+.L177$pb:
+ popl %ebx
+.Ltmp28:
+ addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp28-.L177$pb), %ebx
+ movl %ebx, -152(%ebp) # 4-byte Spill
+ movl 12(%ebp), %edi
+ movl %edi, 8(%esp)
+ movl %edi, 4(%esp)
+ movl 8(%ebp), %esi
+ movl %esi, (%esp)
+ calll mcl_fpDbl_mulPre6L@PLT
+ leal 24(%edi), %eax
+ movl %eax, 8(%esp)
+ movl %eax, 4(%esp)
+ leal 48(%esi), %eax
+ movl %eax, (%esp)
+ calll mcl_fpDbl_mulPre6L@PLT
+ movl 44(%edi), %eax
+ movl %eax, -136(%ebp) # 4-byte Spill
+ movl 40(%edi), %edx
+ movl 36(%edi), %eax
+ movl (%edi), %ebx
+ movl 4(%edi), %esi
+ addl 24(%edi), %ebx
+ adcl 28(%edi), %esi
+ movl 32(%edi), %ecx
+ adcl 8(%edi), %ecx
+ movl %ecx, -144(%ebp) # 4-byte Spill
+ adcl 12(%edi), %eax
+ movl %eax, -140(%ebp) # 4-byte Spill
+ adcl 16(%edi), %edx
+ movl %edx, %ecx
+ movl -136(%ebp), %eax # 4-byte Reload
+ adcl 20(%edi), %eax
+ movl %eax, -136(%ebp) # 4-byte Spill
+ seto %al
+ lahf
+ movl %eax, %edx
+ movl %edx, -156(%ebp) # 4-byte Spill
+ pushl %eax
+ seto %al
+ lahf
+ movl %eax, %edx
+ popl %eax
+ movl %edx, -124(%ebp) # 4-byte Spill
+ seto %al
+ lahf
+ movl %eax, %eax
+ movl %eax, -120(%ebp) # 4-byte Spill
+ seto %al
+ lahf
+ movl %eax, %edx
+ sbbl %edi, %edi
+ movl %edi, -148(%ebp) # 4-byte Spill
+ movl %ebx, %edi
+ addl %edi, %edi
+ movl %edi, -112(%ebp) # 4-byte Spill
+ movl %esi, %edi
+ movl %esi, %eax
+ adcl %edi, %edi
+ movl %edi, -132(%ebp) # 4-byte Spill
+ pushl %eax
+ movl %edx, %eax
+ addb $127, %al
+ sahf
+ popl %eax
+ jb .LBB177_2
+# BB#1:
+ movl $0, -132(%ebp) # 4-byte Folded Spill
+ movl $0, -112(%ebp) # 4-byte Folded Spill
+.LBB177_2:
+ movl -144(%ebp), %esi # 4-byte Reload
+ addl %esi, %esi
+ movl -140(%ebp), %edx # 4-byte Reload
+ adcl %edx, %edx
+ movl %edx, -116(%ebp) # 4-byte Spill
+ movl -120(%ebp), %edx # 4-byte Reload
+ pushl %eax
+ movl %edx, %eax
+ addb $127, %al
+ sahf
+ popl %eax
+ jb .LBB177_3
+# BB#4:
+ movl $0, -116(%ebp) # 4-byte Folded Spill
+ movl $0, -120(%ebp) # 4-byte Folded Spill
+ jmp .LBB177_5
+.LBB177_3:
+ movl %eax, %edx
+ shrl $31, %edx
+ orl %esi, %edx
+ movl %edx, -120(%ebp) # 4-byte Spill
+.LBB177_5:
+ movl -136(%ebp), %edx # 4-byte Reload
+ movl %ecx, %esi
+ addl %esi, %esi
+ adcl %edx, %edx
+ movl -124(%ebp), %edi # 4-byte Reload
+ pushl %eax
+ movl %edi, %eax
+ addb $127, %al
+ sahf
+ popl %eax
+ jb .LBB177_6
+# BB#7:
+ xorl %edx, %edx
+ movl $0, -128(%ebp) # 4-byte Folded Spill
+ movl -140(%ebp), %edi # 4-byte Reload
+ jmp .LBB177_8
+.LBB177_6:
+ movl %ecx, -124(%ebp) # 4-byte Spill
+ movl -140(%ebp), %edi # 4-byte Reload
+ movl %edi, %ecx
+ shrl $31, %ecx
+ orl %esi, %ecx
+ movl %ecx, -128(%ebp) # 4-byte Spill
+ movl -124(%ebp), %ecx # 4-byte Reload
+.LBB177_8:
+ movl %edx, -124(%ebp) # 4-byte Spill
+ movl %ebx, -84(%ebp)
+ movl %eax, -80(%ebp)
+ movl -144(%ebp), %esi # 4-byte Reload
+ movl %esi, -76(%ebp)
+ movl %edi, -72(%ebp)
+ movl %ecx, -68(%ebp)
+ movl -136(%ebp), %edx # 4-byte Reload
+ movl %edx, -64(%ebp)
+ movl %ebx, -108(%ebp)
+ movl %eax, -104(%ebp)
+ movl %esi, -100(%ebp)
+ movl %edi, -96(%ebp)
+ movl %ecx, -92(%ebp)
+ movl %edx, -88(%ebp)
+ movl -156(%ebp), %eax # 4-byte Reload
+ movl %eax, %eax
+ addb $127, %al
+ sahf
+ jb .LBB177_9
+# BB#10:
+ movl $0, -136(%ebp) # 4-byte Folded Spill
+ jmp .LBB177_11
+.LBB177_9:
+ shrl $31, %edx
+ movl %edx, -136(%ebp) # 4-byte Spill
+.LBB177_11:
+ leal -108(%ebp), %eax
+ movl %eax, 8(%esp)
+ leal -84(%ebp), %eax
+ movl %eax, 4(%esp)
+ leal -60(%ebp), %eax
+ movl %eax, (%esp)
+ movl -148(%ebp), %esi # 4-byte Reload
+ andl $1, %esi
+ movl -152(%ebp), %ebx # 4-byte Reload
+ calll mcl_fpDbl_mulPre6L@PLT
+ movl -112(%ebp), %eax # 4-byte Reload
+ addl -36(%ebp), %eax
+ movl %eax, -112(%ebp) # 4-byte Spill
+ movl -132(%ebp), %edi # 4-byte Reload
+ adcl -32(%ebp), %edi
+ movl -120(%ebp), %eax # 4-byte Reload
+ adcl -28(%ebp), %eax
+ movl %eax, -120(%ebp) # 4-byte Spill
+ movl -116(%ebp), %eax # 4-byte Reload
+ adcl -24(%ebp), %eax
+ movl %eax, -116(%ebp) # 4-byte Spill
+ movl -128(%ebp), %eax # 4-byte Reload
+ adcl -20(%ebp), %eax
+ movl %eax, -128(%ebp) # 4-byte Spill
+ movl -124(%ebp), %eax # 4-byte Reload
+ adcl -16(%ebp), %eax
+ movl %eax, -124(%ebp) # 4-byte Spill
+ adcl -136(%ebp), %esi # 4-byte Folded Reload
+ movl -60(%ebp), %edx
+ movl 8(%ebp), %eax
+ subl (%eax), %edx
+ movl -56(%ebp), %ebx
+ sbbl 4(%eax), %ebx
+ movl -52(%ebp), %ecx
+ sbbl 8(%eax), %ecx
+ movl %ecx, -136(%ebp) # 4-byte Spill
+ movl -48(%ebp), %ecx
+ sbbl 12(%eax), %ecx
+ movl %ecx, -144(%ebp) # 4-byte Spill
+ movl -44(%ebp), %ecx
+ sbbl 16(%eax), %ecx
+ movl %ecx, -172(%ebp) # 4-byte Spill
+ movl -40(%ebp), %ecx
+ sbbl 20(%eax), %ecx
+ movl %ecx, -140(%ebp) # 4-byte Spill
+ movl 24(%eax), %ecx
+ movl %ecx, -148(%ebp) # 4-byte Spill
+ sbbl %ecx, -112(%ebp) # 4-byte Folded Spill
+ movl 28(%eax), %ecx
+ movl %ecx, -152(%ebp) # 4-byte Spill
+ sbbl %ecx, %edi
+ movl %edi, -132(%ebp) # 4-byte Spill
+ movl 32(%eax), %ecx
+ movl %ecx, -156(%ebp) # 4-byte Spill
+ sbbl %ecx, -120(%ebp) # 4-byte Folded Spill
+ movl 36(%eax), %ecx
+ movl %ecx, -160(%ebp) # 4-byte Spill
+ sbbl %ecx, -116(%ebp) # 4-byte Folded Spill
+ movl 40(%eax), %ecx
+ movl %ecx, -164(%ebp) # 4-byte Spill
+ sbbl %ecx, -128(%ebp) # 4-byte Folded Spill
+ movl 44(%eax), %ecx
+ movl %ecx, -168(%ebp) # 4-byte Spill
+ sbbl %ecx, -124(%ebp) # 4-byte Folded Spill
+ sbbl $0, %esi
+ movl 48(%eax), %ecx
+ movl %ecx, -192(%ebp) # 4-byte Spill
+ subl %ecx, %edx
+ movl 52(%eax), %ecx
+ movl %ecx, -196(%ebp) # 4-byte Spill
+ sbbl %ecx, %ebx
+ movl 56(%eax), %ecx
+ movl %ecx, -200(%ebp) # 4-byte Spill
+ sbbl %ecx, -136(%ebp) # 4-byte Folded Spill
+ movl 60(%eax), %ecx
+ movl %ecx, -204(%ebp) # 4-byte Spill
+ sbbl %ecx, -144(%ebp) # 4-byte Folded Spill
+ movl 64(%eax), %ecx
+ movl %ecx, -208(%ebp) # 4-byte Spill
+ movl -172(%ebp), %edi # 4-byte Reload
+ sbbl %ecx, %edi
+ movl 68(%eax), %ecx
+ movl %ecx, -212(%ebp) # 4-byte Spill
+ sbbl %ecx, -140(%ebp) # 4-byte Folded Spill
+ movl 72(%eax), %ecx
+ movl %ecx, -216(%ebp) # 4-byte Spill
+ sbbl %ecx, -112(%ebp) # 4-byte Folded Spill
+ movl 76(%eax), %ecx
+ movl %ecx, -172(%ebp) # 4-byte Spill
+ sbbl %ecx, -132(%ebp) # 4-byte Folded Spill
+ movl 80(%eax), %ecx
+ movl %ecx, -176(%ebp) # 4-byte Spill
+ sbbl %ecx, -120(%ebp) # 4-byte Folded Spill
+ movl 84(%eax), %ecx
+ movl %ecx, -180(%ebp) # 4-byte Spill
+ sbbl %ecx, -116(%ebp) # 4-byte Folded Spill
+ movl 88(%eax), %ecx
+ movl %ecx, -184(%ebp) # 4-byte Spill
+ sbbl %ecx, -128(%ebp) # 4-byte Folded Spill
+ movl 92(%eax), %ecx
+ movl %ecx, -188(%ebp) # 4-byte Spill
+ sbbl %ecx, -124(%ebp) # 4-byte Folded Spill
+ sbbl $0, %esi
+ addl -148(%ebp), %edx # 4-byte Folded Reload
+ adcl -152(%ebp), %ebx # 4-byte Folded Reload
+ movl %edx, 24(%eax)
+ movl -136(%ebp), %ecx # 4-byte Reload
+ adcl -156(%ebp), %ecx # 4-byte Folded Reload
+ movl %ebx, 28(%eax)
+ movl -144(%ebp), %edx # 4-byte Reload
+ adcl -160(%ebp), %edx # 4-byte Folded Reload
+ movl %ecx, 32(%eax)
+ adcl -164(%ebp), %edi # 4-byte Folded Reload
+ movl %edx, 36(%eax)
+ movl -140(%ebp), %edx # 4-byte Reload
+ adcl -168(%ebp), %edx # 4-byte Folded Reload
+ movl %edi, 40(%eax)
+ movl -112(%ebp), %ecx # 4-byte Reload
+ adcl -192(%ebp), %ecx # 4-byte Folded Reload
+ movl %edx, 44(%eax)
+ movl -132(%ebp), %edi # 4-byte Reload
+ adcl -196(%ebp), %edi # 4-byte Folded Reload
+ movl %ecx, 48(%eax)
+ movl -120(%ebp), %edx # 4-byte Reload
+ adcl -200(%ebp), %edx # 4-byte Folded Reload
+ movl %edi, 52(%eax)
+ movl -116(%ebp), %ecx # 4-byte Reload
+ adcl -204(%ebp), %ecx # 4-byte Folded Reload
+ movl %edx, 56(%eax)
+ movl -128(%ebp), %edx # 4-byte Reload
+ adcl -208(%ebp), %edx # 4-byte Folded Reload
+ movl %ecx, 60(%eax)
+ movl -124(%ebp), %ecx # 4-byte Reload
+ adcl -212(%ebp), %ecx # 4-byte Folded Reload
+ movl %edx, 64(%eax)
+ adcl -216(%ebp), %esi # 4-byte Folded Reload
+ movl %ecx, 68(%eax)
+ movl %esi, 72(%eax)
+ movl -172(%ebp), %ecx # 4-byte Reload
+ adcl $0, %ecx
+ movl %ecx, 76(%eax)
+ movl -176(%ebp), %ecx # 4-byte Reload
+ adcl $0, %ecx
+ movl %ecx, 80(%eax)
+ movl -180(%ebp), %ecx # 4-byte Reload
+ adcl $0, %ecx
+ movl %ecx, 84(%eax)
+ movl -184(%ebp), %ecx # 4-byte Reload
+ adcl $0, %ecx
+ movl %ecx, 88(%eax)
+ movl -188(%ebp), %ecx # 4-byte Reload
+ adcl $0, %ecx
+ movl %ecx, 92(%eax)
+ addl $220, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end177:
+ .size mcl_fpDbl_sqrPre12L, .Lfunc_end177-mcl_fpDbl_sqrPre12L
+
+ .globl mcl_fp_mont12L
+ .align 16, 0x90
+ .type mcl_fp_mont12L,@function
+mcl_fp_mont12L: # @mcl_fp_mont12L
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $1436, %esp # imm = 0x59C
+ calll .L178$pb
+.L178$pb:
+ popl %ebx
+.Ltmp29:
+ addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp29-.L178$pb), %ebx
+ movl 1468(%esp), %eax
+ movl -4(%eax), %esi
+ movl %esi, 40(%esp) # 4-byte Spill
+ movl 1464(%esp), %eax
+ movl (%eax), %eax
+ movl %eax, (%esp)
+ leal 1384(%esp), %ecx
+ movl 1460(%esp), %edx
+ calll .LmulPv384x32
+ movl 1384(%esp), %ebp
+ movl 1388(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl %ebp, %eax
+ imull %esi, %eax
+ movl 1432(%esp), %ecx
+ movl %ecx, 92(%esp) # 4-byte Spill
+ movl 1428(%esp), %ecx
+ movl %ecx, 80(%esp) # 4-byte Spill
+ movl 1424(%esp), %ecx
+ movl %ecx, 88(%esp) # 4-byte Spill
+ movl 1420(%esp), %ecx
+ movl %ecx, 76(%esp) # 4-byte Spill
+ movl 1416(%esp), %ecx
+ movl %ecx, 60(%esp) # 4-byte Spill
+ movl 1412(%esp), %ecx
+ movl %ecx, 52(%esp) # 4-byte Spill
+ movl 1408(%esp), %ecx
+ movl %ecx, 44(%esp) # 4-byte Spill
+ movl 1404(%esp), %ecx
+ movl %ecx, 48(%esp) # 4-byte Spill
+ movl 1400(%esp), %ecx
+ movl %ecx, 56(%esp) # 4-byte Spill
+ movl 1396(%esp), %edi
+ movl 1392(%esp), %esi
+ movl %eax, (%esp)
+ leal 1328(%esp), %ecx
+ movl 1468(%esp), %edx
+ calll .LmulPv384x32
+ addl 1328(%esp), %ebp
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 1332(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ adcl 1336(%esp), %esi
+ movl %esi, 72(%esp) # 4-byte Spill
+ adcl 1340(%esp), %edi
+ movl %edi, 68(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 1344(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 1348(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 1352(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 1356(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 1360(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 1364(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 1368(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 80(%esp), %esi # 4-byte Reload
+ adcl 1372(%esp), %esi
+ movl 92(%esp), %ebp # 4-byte Reload
+ adcl 1376(%esp), %ebp
+ sbbl %edi, %edi
+ movl 1464(%esp), %eax
+ movl 4(%eax), %eax
+ movl %eax, (%esp)
+ leal 1272(%esp), %ecx
+ movl 1460(%esp), %edx
+ calll .LmulPv384x32
+ andl $1, %edi
+ movl 84(%esp), %ecx # 4-byte Reload
+ addl 1272(%esp), %ecx
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 1276(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 1280(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 1284(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 1288(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 1292(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 1296(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 1300(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 1304(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 1308(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ adcl 1312(%esp), %esi
+ movl %esi, 80(%esp) # 4-byte Spill
+ adcl 1316(%esp), %ebp
+ adcl 1320(%esp), %edi
+ sbbl %eax, %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl %ecx, %esi
+ movl %esi, %eax
+ imull 40(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 1216(%esp), %ecx
+ movl 1468(%esp), %edx
+ calll .LmulPv384x32
+ movl 84(%esp), %eax # 4-byte Reload
+ andl $1, %eax
+ addl 1216(%esp), %esi
+ movl 72(%esp), %ecx # 4-byte Reload
+ adcl 1220(%esp), %ecx
+ movl %ecx, 72(%esp) # 4-byte Spill
+ movl 68(%esp), %esi # 4-byte Reload
+ adcl 1224(%esp), %esi
+ movl 56(%esp), %ecx # 4-byte Reload
+ adcl 1228(%esp), %ecx
+ movl %ecx, 56(%esp) # 4-byte Spill
+ movl 48(%esp), %ecx # 4-byte Reload
+ adcl 1232(%esp), %ecx
+ movl %ecx, 48(%esp) # 4-byte Spill
+ movl 44(%esp), %ecx # 4-byte Reload
+ adcl 1236(%esp), %ecx
+ movl %ecx, 44(%esp) # 4-byte Spill
+ movl 52(%esp), %ecx # 4-byte Reload
+ adcl 1240(%esp), %ecx
+ movl %ecx, 52(%esp) # 4-byte Spill
+ movl 60(%esp), %ecx # 4-byte Reload
+ adcl 1244(%esp), %ecx
+ movl %ecx, 60(%esp) # 4-byte Spill
+ movl 76(%esp), %ecx # 4-byte Reload
+ adcl 1248(%esp), %ecx
+ movl %ecx, 76(%esp) # 4-byte Spill
+ movl 88(%esp), %ecx # 4-byte Reload
+ adcl 1252(%esp), %ecx
+ movl %ecx, 88(%esp) # 4-byte Spill
+ movl 80(%esp), %ecx # 4-byte Reload
+ adcl 1256(%esp), %ecx
+ movl %ecx, 80(%esp) # 4-byte Spill
+ adcl 1260(%esp), %ebp
+ adcl 1264(%esp), %edi
+ adcl $0, %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 1464(%esp), %eax
+ movl 8(%eax), %eax
+ movl %eax, (%esp)
+ leal 1160(%esp), %ecx
+ movl 1460(%esp), %edx
+ calll .LmulPv384x32
+ movl 72(%esp), %ecx # 4-byte Reload
+ addl 1160(%esp), %ecx
+ adcl 1164(%esp), %esi
+ movl %esi, 68(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 1168(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 1172(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 1176(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 1180(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 1184(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 1188(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 1192(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 1196(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ adcl 1200(%esp), %ebp
+ movl %ebp, 92(%esp) # 4-byte Spill
+ adcl 1204(%esp), %edi
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 1208(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ sbbl %ebp, %ebp
+ movl %ecx, %esi
+ movl %esi, %eax
+ imull 40(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 1104(%esp), %ecx
+ movl 1468(%esp), %eax
+ movl %eax, %edx
+ calll .LmulPv384x32
+ andl $1, %ebp
+ movl %ebp, %ecx
+ addl 1104(%esp), %esi
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 1108(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 1112(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 1116(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 1120(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 1124(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 1128(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 1132(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 1136(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 80(%esp), %esi # 4-byte Reload
+ adcl 1140(%esp), %esi
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 1144(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ adcl 1148(%esp), %edi
+ movl 84(%esp), %ebp # 4-byte Reload
+ adcl 1152(%esp), %ebp
+ adcl $0, %ecx
+ movl %ecx, 72(%esp) # 4-byte Spill
+ movl 1464(%esp), %eax
+ movl 12(%eax), %eax
+ movl %eax, (%esp)
+ leal 1048(%esp), %ecx
+ movl 1460(%esp), %edx
+ calll .LmulPv384x32
+ movl 68(%esp), %ecx # 4-byte Reload
+ addl 1048(%esp), %ecx
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 1052(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 1056(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 1060(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 1064(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 1068(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 1072(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 1076(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ adcl 1080(%esp), %esi
+ movl %esi, 80(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 1084(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ adcl 1088(%esp), %edi
+ adcl 1092(%esp), %ebp
+ movl %ebp, 84(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 1096(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ sbbl %esi, %esi
+ movl %ecx, %ebp
+ movl %ebp, %eax
+ imull 40(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 992(%esp), %ecx
+ movl 1468(%esp), %edx
+ calll .LmulPv384x32
+ andl $1, %esi
+ movl %esi, %eax
+ addl 992(%esp), %ebp
+ movl 56(%esp), %ecx # 4-byte Reload
+ adcl 996(%esp), %ecx
+ movl %ecx, 56(%esp) # 4-byte Spill
+ movl 48(%esp), %ecx # 4-byte Reload
+ adcl 1000(%esp), %ecx
+ movl %ecx, 48(%esp) # 4-byte Spill
+ movl 44(%esp), %ebp # 4-byte Reload
+ adcl 1004(%esp), %ebp
+ movl 52(%esp), %ecx # 4-byte Reload
+ adcl 1008(%esp), %ecx
+ movl %ecx, 52(%esp) # 4-byte Spill
+ movl 60(%esp), %ecx # 4-byte Reload
+ adcl 1012(%esp), %ecx
+ movl %ecx, 60(%esp) # 4-byte Spill
+ movl 76(%esp), %ecx # 4-byte Reload
+ adcl 1016(%esp), %ecx
+ movl %ecx, 76(%esp) # 4-byte Spill
+ movl 88(%esp), %esi # 4-byte Reload
+ adcl 1020(%esp), %esi
+ movl 80(%esp), %ecx # 4-byte Reload
+ adcl 1024(%esp), %ecx
+ movl %ecx, 80(%esp) # 4-byte Spill
+ movl 92(%esp), %ecx # 4-byte Reload
+ adcl 1028(%esp), %ecx
+ movl %ecx, 92(%esp) # 4-byte Spill
+ adcl 1032(%esp), %edi
+ movl %edi, 68(%esp) # 4-byte Spill
+ movl 84(%esp), %ecx # 4-byte Reload
+ adcl 1036(%esp), %ecx
+ movl %ecx, 84(%esp) # 4-byte Spill
+ movl 72(%esp), %ecx # 4-byte Reload
+ adcl 1040(%esp), %ecx
+ movl %ecx, 72(%esp) # 4-byte Spill
+ movl %eax, %edi
+ adcl $0, %edi
+ movl 1464(%esp), %eax
+ movl 16(%eax), %eax
+ movl %eax, (%esp)
+ leal 936(%esp), %ecx
+ movl 1460(%esp), %edx
+ calll .LmulPv384x32
+ movl 56(%esp), %ecx # 4-byte Reload
+ addl 936(%esp), %ecx
+ movl %ecx, 56(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 940(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ adcl 944(%esp), %ebp
+ movl %ebp, 44(%esp) # 4-byte Spill
+ movl 52(%esp), %ebp # 4-byte Reload
+ adcl 948(%esp), %ebp
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 952(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 956(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ adcl 960(%esp), %esi
+ movl %esi, 88(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 964(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 968(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 972(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 976(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 72(%esp), %esi # 4-byte Reload
+ adcl 980(%esp), %esi
+ adcl 984(%esp), %edi
+ movl %edi, 64(%esp) # 4-byte Spill
+ sbbl %edi, %edi
+ movl %ecx, %eax
+ imull 40(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 880(%esp), %ecx
+ movl 1468(%esp), %edx
+ calll .LmulPv384x32
+ andl $1, %edi
+ movl %edi, %ecx
+ movl 56(%esp), %eax # 4-byte Reload
+ addl 880(%esp), %eax
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 884(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 888(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ adcl 892(%esp), %ebp
+ movl %ebp, 52(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 896(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 900(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 904(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 908(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 92(%esp), %edi # 4-byte Reload
+ adcl 912(%esp), %edi
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 916(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 920(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ adcl 924(%esp), %esi
+ movl %esi, %ebp
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 928(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ adcl $0, %ecx
+ movl %ecx, 56(%esp) # 4-byte Spill
+ movl 1464(%esp), %eax
+ movl 20(%eax), %eax
+ movl %eax, (%esp)
+ leal 824(%esp), %ecx
+ movl 1460(%esp), %edx
+ calll .LmulPv384x32
+ movl 48(%esp), %ecx # 4-byte Reload
+ addl 824(%esp), %ecx
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 828(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 832(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 836(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 76(%esp), %esi # 4-byte Reload
+ adcl 840(%esp), %esi
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 844(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 848(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ adcl 852(%esp), %edi
+ movl %edi, 92(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 856(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 860(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ adcl 864(%esp), %ebp
+ movl %ebp, 72(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 868(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 872(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ sbbl %ebp, %ebp
+ movl %ecx, %edi
+ movl %edi, %eax
+ imull 40(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 768(%esp), %ecx
+ movl 1468(%esp), %edx
+ calll .LmulPv384x32
+ andl $1, %ebp
+ movl %ebp, %ecx
+ addl 768(%esp), %edi
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 772(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 776(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 60(%esp), %ebp # 4-byte Reload
+ adcl 780(%esp), %ebp
+ adcl 784(%esp), %esi
+ movl %esi, 76(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 788(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 792(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 796(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 68(%esp), %esi # 4-byte Reload
+ adcl 800(%esp), %esi
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 804(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 72(%esp), %edi # 4-byte Reload
+ adcl 808(%esp), %edi
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 812(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 816(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ adcl $0, %ecx
+ movl %ecx, 48(%esp) # 4-byte Spill
+ movl 1464(%esp), %eax
+ movl 24(%eax), %eax
+ movl %eax, (%esp)
+ leal 712(%esp), %ecx
+ movl 1460(%esp), %eax
+ movl %eax, %edx
+ calll .LmulPv384x32
+ movl 44(%esp), %eax # 4-byte Reload
+ addl 712(%esp), %eax
+ movl 52(%esp), %ecx # 4-byte Reload
+ adcl 716(%esp), %ecx
+ movl %ecx, 52(%esp) # 4-byte Spill
+ adcl 720(%esp), %ebp
+ movl %ebp, 60(%esp) # 4-byte Spill
+ movl 76(%esp), %ecx # 4-byte Reload
+ adcl 724(%esp), %ecx
+ movl %ecx, 76(%esp) # 4-byte Spill
+ movl 88(%esp), %ecx # 4-byte Reload
+ adcl 728(%esp), %ecx
+ movl %ecx, 88(%esp) # 4-byte Spill
+ movl 80(%esp), %ecx # 4-byte Reload
+ adcl 732(%esp), %ecx
+ movl %ecx, 80(%esp) # 4-byte Spill
+ movl 92(%esp), %ecx # 4-byte Reload
+ adcl 736(%esp), %ecx
+ movl %ecx, 92(%esp) # 4-byte Spill
+ adcl 740(%esp), %esi
+ movl %esi, 68(%esp) # 4-byte Spill
+ movl 84(%esp), %ecx # 4-byte Reload
+ adcl 744(%esp), %ecx
+ movl %ecx, 84(%esp) # 4-byte Spill
+ adcl 748(%esp), %edi
+ movl %edi, 72(%esp) # 4-byte Spill
+ movl 64(%esp), %ecx # 4-byte Reload
+ adcl 752(%esp), %ecx
+ movl %ecx, 64(%esp) # 4-byte Spill
+ movl 56(%esp), %ecx # 4-byte Reload
+ adcl 756(%esp), %ecx
+ movl %ecx, 56(%esp) # 4-byte Spill
+ movl 48(%esp), %edi # 4-byte Reload
+ adcl 760(%esp), %edi
+ sbbl %ebp, %ebp
+ movl %eax, %esi
+ imull 40(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 656(%esp), %ecx
+ movl 1468(%esp), %edx
+ calll .LmulPv384x32
+ andl $1, %ebp
+ movl %ebp, %eax
+ addl 656(%esp), %esi
+ movl 52(%esp), %ecx # 4-byte Reload
+ adcl 660(%esp), %ecx
+ movl %ecx, 52(%esp) # 4-byte Spill
+ movl 60(%esp), %esi # 4-byte Reload
+ adcl 664(%esp), %esi
+ movl 76(%esp), %ecx # 4-byte Reload
+ adcl 668(%esp), %ecx
+ movl %ecx, 76(%esp) # 4-byte Spill
+ movl 88(%esp), %ecx # 4-byte Reload
+ adcl 672(%esp), %ecx
+ movl %ecx, 88(%esp) # 4-byte Spill
+ movl 80(%esp), %ebp # 4-byte Reload
+ adcl 676(%esp), %ebp
+ movl 92(%esp), %ecx # 4-byte Reload
+ adcl 680(%esp), %ecx
+ movl %ecx, 92(%esp) # 4-byte Spill
+ movl 68(%esp), %ecx # 4-byte Reload
+ adcl 684(%esp), %ecx
+ movl %ecx, 68(%esp) # 4-byte Spill
+ movl 84(%esp), %ecx # 4-byte Reload
+ adcl 688(%esp), %ecx
+ movl %ecx, 84(%esp) # 4-byte Spill
+ movl 72(%esp), %ecx # 4-byte Reload
+ adcl 692(%esp), %ecx
+ movl %ecx, 72(%esp) # 4-byte Spill
+ movl 64(%esp), %ecx # 4-byte Reload
+ adcl 696(%esp), %ecx
+ movl %ecx, 64(%esp) # 4-byte Spill
+ movl 56(%esp), %ecx # 4-byte Reload
+ adcl 700(%esp), %ecx
+ movl %ecx, 56(%esp) # 4-byte Spill
+ adcl 704(%esp), %edi
+ movl %edi, 48(%esp) # 4-byte Spill
+ movl %eax, %edi
+ adcl $0, %edi
+ movl 1464(%esp), %eax
+ movl 28(%eax), %eax
+ movl %eax, (%esp)
+ leal 600(%esp), %ecx
+ movl 1460(%esp), %edx
+ calll .LmulPv384x32
+ movl 52(%esp), %ecx # 4-byte Reload
+ addl 600(%esp), %ecx
+ adcl 604(%esp), %esi
+ movl %esi, 60(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 608(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 612(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ adcl 616(%esp), %ebp
+ movl %ebp, 80(%esp) # 4-byte Spill
+ movl 92(%esp), %ebp # 4-byte Reload
+ adcl 620(%esp), %ebp
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 624(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 628(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 632(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 64(%esp), %esi # 4-byte Reload
+ adcl 636(%esp), %esi
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 640(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 644(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ adcl 648(%esp), %edi
+ movl %edi, 52(%esp) # 4-byte Spill
+ sbbl %eax, %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ movl %ecx, %edi
+ imull 40(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 544(%esp), %ecx
+ movl 1468(%esp), %edx
+ calll .LmulPv384x32
+ movl 44(%esp), %eax # 4-byte Reload
+ andl $1, %eax
+ addl 544(%esp), %edi
+ movl 60(%esp), %ecx # 4-byte Reload
+ adcl 548(%esp), %ecx
+ movl %ecx, 60(%esp) # 4-byte Spill
+ movl 76(%esp), %ecx # 4-byte Reload
+ adcl 552(%esp), %ecx
+ movl %ecx, 76(%esp) # 4-byte Spill
+ movl 88(%esp), %ecx # 4-byte Reload
+ adcl 556(%esp), %ecx
+ movl %ecx, 88(%esp) # 4-byte Spill
+ movl 80(%esp), %ecx # 4-byte Reload
+ adcl 560(%esp), %ecx
+ movl %ecx, 80(%esp) # 4-byte Spill
+ adcl 564(%esp), %ebp
+ movl %ebp, 92(%esp) # 4-byte Spill
+ movl 68(%esp), %ecx # 4-byte Reload
+ adcl 568(%esp), %ecx
+ movl %ecx, 68(%esp) # 4-byte Spill
+ movl 84(%esp), %edi # 4-byte Reload
+ adcl 572(%esp), %edi
+ movl 72(%esp), %ecx # 4-byte Reload
+ adcl 576(%esp), %ecx
+ movl %ecx, 72(%esp) # 4-byte Spill
+ adcl 580(%esp), %esi
+ movl %esi, 64(%esp) # 4-byte Spill
+ movl 56(%esp), %esi # 4-byte Reload
+ adcl 584(%esp), %esi
+ movl 48(%esp), %ecx # 4-byte Reload
+ adcl 588(%esp), %ecx
+ movl %ecx, 48(%esp) # 4-byte Spill
+ movl 52(%esp), %ebp # 4-byte Reload
+ adcl 592(%esp), %ebp
+ adcl $0, %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 1464(%esp), %eax
+ movl 32(%eax), %eax
+ movl %eax, (%esp)
+ leal 488(%esp), %ecx
+ movl 1460(%esp), %edx
+ calll .LmulPv384x32
+ movl 60(%esp), %ecx # 4-byte Reload
+ addl 488(%esp), %ecx
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 492(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 496(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 500(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 504(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 508(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ adcl 512(%esp), %edi
+ movl %edi, 84(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 516(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 520(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ adcl 524(%esp), %esi
+ movl %esi, 56(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 528(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ adcl 532(%esp), %ebp
+ movl %ebp, 52(%esp) # 4-byte Spill
+ movl 44(%esp), %ebp # 4-byte Reload
+ adcl 536(%esp), %ebp
+ sbbl %edi, %edi
+ movl %ecx, %eax
+ movl %ecx, %esi
+ imull 40(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 432(%esp), %ecx
+ movl 1468(%esp), %edx
+ calll .LmulPv384x32
+ andl $1, %edi
+ movl %edi, %ecx
+ addl 432(%esp), %esi
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 436(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 88(%esp), %edi # 4-byte Reload
+ adcl 440(%esp), %edi
+ movl 80(%esp), %esi # 4-byte Reload
+ adcl 444(%esp), %esi
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 448(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 452(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 456(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 460(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 464(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 468(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 472(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 476(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ adcl 480(%esp), %ebp
+ movl %ebp, 44(%esp) # 4-byte Spill
+ adcl $0, %ecx
+ movl %ecx, 60(%esp) # 4-byte Spill
+ movl 1464(%esp), %eax
+ movl 36(%eax), %eax
+ movl %eax, (%esp)
+ leal 376(%esp), %ecx
+ movl 1460(%esp), %edx
+ calll .LmulPv384x32
+ movl 76(%esp), %ecx # 4-byte Reload
+ addl 376(%esp), %ecx
+ adcl 380(%esp), %edi
+ movl %edi, 88(%esp) # 4-byte Spill
+ adcl 384(%esp), %esi
+ movl %esi, 80(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 388(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 68(%esp), %edi # 4-byte Reload
+ adcl 392(%esp), %edi
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 396(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 400(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 404(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 408(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 412(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 52(%esp), %ebp # 4-byte Reload
+ adcl 416(%esp), %ebp
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 420(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 424(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ sbbl %eax, %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ movl %ecx, %esi
+ imull 40(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 320(%esp), %ecx
+ movl 1468(%esp), %edx
+ calll .LmulPv384x32
+ movl 76(%esp), %ecx # 4-byte Reload
+ andl $1, %ecx
+ addl 320(%esp), %esi
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 324(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 328(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 92(%esp), %esi # 4-byte Reload
+ adcl 332(%esp), %esi
+ adcl 336(%esp), %edi
+ movl %edi, 68(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 340(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 72(%esp), %edi # 4-byte Reload
+ adcl 344(%esp), %edi
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 348(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 352(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 356(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ adcl 360(%esp), %ebp
+ movl %ebp, 52(%esp) # 4-byte Spill
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 364(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 368(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ adcl $0, %ecx
+ movl %ecx, 76(%esp) # 4-byte Spill
+ movl 1464(%esp), %eax
+ movl 40(%eax), %eax
+ movl %eax, (%esp)
+ leal 264(%esp), %ecx
+ movl 1460(%esp), %edx
+ calll .LmulPv384x32
+ movl 88(%esp), %ecx # 4-byte Reload
+ addl 264(%esp), %ecx
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 268(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ adcl 272(%esp), %esi
+ movl %esi, 92(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 276(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 280(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ adcl 284(%esp), %edi
+ movl %edi, 72(%esp) # 4-byte Spill
+ movl 64(%esp), %edi # 4-byte Reload
+ adcl 288(%esp), %edi
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 292(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 48(%esp), %ebp # 4-byte Reload
+ adcl 296(%esp), %ebp
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 300(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 304(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 308(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 312(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ sbbl %eax, %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ movl %ecx, %esi
+ imull 40(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 208(%esp), %ecx
+ movl 1468(%esp), %edx
+ calll .LmulPv384x32
+ movl 88(%esp), %ecx # 4-byte Reload
+ andl $1, %ecx
+ addl 208(%esp), %esi
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 212(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 216(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 220(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 84(%esp), %esi # 4-byte Reload
+ adcl 224(%esp), %esi
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 228(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ adcl 232(%esp), %edi
+ movl %edi, 64(%esp) # 4-byte Spill
+ movl 56(%esp), %edi # 4-byte Reload
+ adcl 236(%esp), %edi
+ adcl 240(%esp), %ebp
+ movl %ebp, 48(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 244(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 44(%esp), %ebp # 4-byte Reload
+ adcl 248(%esp), %ebp
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 252(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 256(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ adcl $0, %ecx
+ movl %ecx, 88(%esp) # 4-byte Spill
+ movl 1464(%esp), %eax
+ movl 44(%eax), %eax
+ movl %eax, (%esp)
+ leal 152(%esp), %ecx
+ movl 1460(%esp), %edx
+ calll .LmulPv384x32
+ movl 80(%esp), %ecx # 4-byte Reload
+ addl 152(%esp), %ecx
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 156(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 160(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ adcl 164(%esp), %esi
+ movl %esi, 84(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 168(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 172(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ adcl 176(%esp), %edi
+ movl %edi, 56(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 180(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 184(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ adcl 188(%esp), %ebp
+ movl %ebp, 44(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 192(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 76(%esp), %ebp # 4-byte Reload
+ adcl 196(%esp), %ebp
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 200(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ sbbl %esi, %esi
+ movl 40(%esp), %eax # 4-byte Reload
+ imull %ecx, %eax
+ movl %ecx, %edi
+ movl %eax, (%esp)
+ leal 96(%esp), %ecx
+ movl 1468(%esp), %edx
+ calll .LmulPv384x32
+ andl $1, %esi
+ addl 96(%esp), %edi
+ movl 84(%esp), %ebx # 4-byte Reload
+ movl 92(%esp), %eax # 4-byte Reload
+ movl 72(%esp), %ecx # 4-byte Reload
+ adcl 100(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 68(%esp), %edx # 4-byte Reload
+ adcl 104(%esp), %edx
+ movl %edx, 68(%esp) # 4-byte Spill
+ movl %edx, %edi
+ adcl 108(%esp), %ebx
+ adcl 112(%esp), %ecx
+ movl %ecx, 72(%esp) # 4-byte Spill
+ movl 64(%esp), %edx # 4-byte Reload
+ adcl 116(%esp), %edx
+ movl %edx, 64(%esp) # 4-byte Spill
+ movl 56(%esp), %edx # 4-byte Reload
+ adcl 120(%esp), %edx
+ movl %edx, 56(%esp) # 4-byte Spill
+ movl 48(%esp), %edx # 4-byte Reload
+ adcl 124(%esp), %edx
+ movl %edx, 48(%esp) # 4-byte Spill
+ movl 52(%esp), %edx # 4-byte Reload
+ adcl 128(%esp), %edx
+ movl %edx, 52(%esp) # 4-byte Spill
+ movl 44(%esp), %edx # 4-byte Reload
+ adcl 132(%esp), %edx
+ movl %edx, 44(%esp) # 4-byte Spill
+ movl 60(%esp), %edx # 4-byte Reload
+ adcl 136(%esp), %edx
+ movl %edx, 60(%esp) # 4-byte Spill
+ adcl 140(%esp), %ebp
+ movl %ebp, 76(%esp) # 4-byte Spill
+ movl 88(%esp), %edx # 4-byte Reload
+ adcl 144(%esp), %edx
+ movl %edx, 88(%esp) # 4-byte Spill
+ movl %edx, %ebp
+ adcl $0, %esi
+ movl 1468(%esp), %edx
+ subl (%edx), %eax
+ sbbl 4(%edx), %edi
+ movl %edi, 16(%esp) # 4-byte Spill
+ movl %ebx, %edi
+ sbbl 8(%edx), %edi
+ movl %edi, 20(%esp) # 4-byte Spill
+ sbbl 12(%edx), %ecx
+ movl %ecx, 24(%esp) # 4-byte Spill
+ movl 64(%esp), %ecx # 4-byte Reload
+ sbbl 16(%edx), %ecx
+ movl %ecx, 28(%esp) # 4-byte Spill
+ movl 56(%esp), %ecx # 4-byte Reload
+ sbbl 20(%edx), %ecx
+ movl %ecx, 32(%esp) # 4-byte Spill
+ movl 48(%esp), %ecx # 4-byte Reload
+ sbbl 24(%edx), %ecx
+ movl %ecx, 36(%esp) # 4-byte Spill
+ movl 52(%esp), %ecx # 4-byte Reload
+ sbbl 28(%edx), %ecx
+ movl 44(%esp), %edi # 4-byte Reload
+ sbbl 32(%edx), %edi
+ movl %edi, 40(%esp) # 4-byte Spill
+ movl 60(%esp), %edi # 4-byte Reload
+ sbbl 36(%edx), %edi
+ movl %edi, 80(%esp) # 4-byte Spill
+ movl 76(%esp), %edi # 4-byte Reload
+ sbbl 40(%edx), %edi
+ movl %edi, 84(%esp) # 4-byte Spill
+ sbbl 44(%edx), %ebp
+ movl %ebp, %edx
+ sbbl $0, %esi
+ andl $1, %esi
+ jne .LBB178_2
+# BB#1:
+ movl %ecx, 52(%esp) # 4-byte Spill
+.LBB178_2:
+ movl %esi, %ecx
+ testb %cl, %cl
+ movl 92(%esp), %ecx # 4-byte Reload
+ jne .LBB178_4
+# BB#3:
+ movl %eax, %ecx
+.LBB178_4:
+ movl 1456(%esp), %eax
+ movl %ecx, (%eax)
+ movl 68(%esp), %edi # 4-byte Reload
+ jne .LBB178_6
+# BB#5:
+ movl 16(%esp), %edi # 4-byte Reload
+.LBB178_6:
+ movl %edi, 4(%eax)
+ movl 64(%esp), %ebp # 4-byte Reload
+ jne .LBB178_8
+# BB#7:
+ movl 20(%esp), %ebx # 4-byte Reload
+.LBB178_8:
+ movl %ebx, 8(%eax)
+ jne .LBB178_10
+# BB#9:
+ movl 24(%esp), %ecx # 4-byte Reload
+ movl %ecx, 72(%esp) # 4-byte Spill
+.LBB178_10:
+ movl 72(%esp), %ecx # 4-byte Reload
+ movl %ecx, 12(%eax)
+ jne .LBB178_12
+# BB#11:
+ movl 28(%esp), %ebp # 4-byte Reload
+.LBB178_12:
+ movl %ebp, 16(%eax)
+ movl 56(%esp), %ecx # 4-byte Reload
+ jne .LBB178_14
+# BB#13:
+ movl 32(%esp), %ecx # 4-byte Reload
+.LBB178_14:
+ movl %ecx, 20(%eax)
+ movl 48(%esp), %ecx # 4-byte Reload
+ jne .LBB178_16
+# BB#15:
+ movl 36(%esp), %ecx # 4-byte Reload
+.LBB178_16:
+ movl %ecx, 24(%eax)
+ movl 52(%esp), %ecx # 4-byte Reload
+ movl %ecx, 28(%eax)
+ movl 44(%esp), %ecx # 4-byte Reload
+ jne .LBB178_18
+# BB#17:
+ movl 40(%esp), %ecx # 4-byte Reload
+.LBB178_18:
+ movl %ecx, 32(%eax)
+ movl 60(%esp), %ecx # 4-byte Reload
+ jne .LBB178_20
+# BB#19:
+ movl 80(%esp), %ecx # 4-byte Reload
+.LBB178_20:
+ movl %ecx, 36(%eax)
+ movl 76(%esp), %ecx # 4-byte Reload
+ jne .LBB178_22
+# BB#21:
+ movl 84(%esp), %ecx # 4-byte Reload
+.LBB178_22:
+ movl %ecx, 40(%eax)
+ movl 88(%esp), %ecx # 4-byte Reload
+ jne .LBB178_24
+# BB#23:
+ movl %edx, %ecx
+.LBB178_24:
+ movl %ecx, 44(%eax)
+ addl $1436, %esp # imm = 0x59C
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end178:
+ .size mcl_fp_mont12L, .Lfunc_end178-mcl_fp_mont12L
+
+ .globl mcl_fp_montNF12L
+ .align 16, 0x90
+ .type mcl_fp_montNF12L,@function
+mcl_fp_montNF12L: # @mcl_fp_montNF12L
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $1420, %esp # imm = 0x58C
+ calll .L179$pb
+.L179$pb:
+ popl %ebx
+.Ltmp30:
+ addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp30-.L179$pb), %ebx
+ movl 1452(%esp), %eax
+ movl -4(%eax), %esi
+ movl %esi, 28(%esp) # 4-byte Spill
+ movl 1448(%esp), %eax
+ movl (%eax), %eax
+ movl %eax, (%esp)
+ leal 1368(%esp), %ecx
+ movl 1444(%esp), %edx
+ calll .LmulPv384x32
+ movl 1368(%esp), %ebp
+ movl 1372(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl %ebp, %eax
+ imull %esi, %eax
+ movl 1416(%esp), %ecx
+ movl %ecx, 76(%esp) # 4-byte Spill
+ movl 1412(%esp), %ecx
+ movl %ecx, 60(%esp) # 4-byte Spill
+ movl 1408(%esp), %ecx
+ movl %ecx, 64(%esp) # 4-byte Spill
+ movl 1404(%esp), %ecx
+ movl %ecx, 72(%esp) # 4-byte Spill
+ movl 1400(%esp), %ecx
+ movl %ecx, 44(%esp) # 4-byte Spill
+ movl 1396(%esp), %ecx
+ movl %ecx, 48(%esp) # 4-byte Spill
+ movl 1392(%esp), %ecx
+ movl %ecx, 32(%esp) # 4-byte Spill
+ movl 1388(%esp), %ecx
+ movl %ecx, 36(%esp) # 4-byte Spill
+ movl 1384(%esp), %ecx
+ movl %ecx, 40(%esp) # 4-byte Spill
+ movl 1380(%esp), %edi
+ movl 1376(%esp), %esi
+ movl %eax, (%esp)
+ leal 1312(%esp), %ecx
+ movl 1452(%esp), %edx
+ calll .LmulPv384x32
+ addl 1312(%esp), %ebp
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 1316(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ adcl 1320(%esp), %esi
+ movl %esi, 68(%esp) # 4-byte Spill
+ adcl 1324(%esp), %edi
+ movl %edi, 52(%esp) # 4-byte Spill
+ movl 40(%esp), %eax # 4-byte Reload
+ adcl 1328(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 36(%esp), %eax # 4-byte Reload
+ adcl 1332(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 32(%esp), %eax # 4-byte Reload
+ adcl 1336(%esp), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 1340(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 44(%esp), %edi # 4-byte Reload
+ adcl 1344(%esp), %edi
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 1348(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 1352(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 60(%esp), %esi # 4-byte Reload
+ adcl 1356(%esp), %esi
+ movl 76(%esp), %ebp # 4-byte Reload
+ adcl 1360(%esp), %ebp
+ movl 1448(%esp), %eax
+ movl 4(%eax), %eax
+ movl %eax, (%esp)
+ leal 1256(%esp), %ecx
+ movl 1444(%esp), %edx
+ calll .LmulPv384x32
+ movl 1304(%esp), %eax
+ movl 56(%esp), %edx # 4-byte Reload
+ addl 1256(%esp), %edx
+ movl 68(%esp), %ecx # 4-byte Reload
+ adcl 1260(%esp), %ecx
+ movl %ecx, 68(%esp) # 4-byte Spill
+ movl 52(%esp), %ecx # 4-byte Reload
+ adcl 1264(%esp), %ecx
+ movl %ecx, 52(%esp) # 4-byte Spill
+ movl 40(%esp), %ecx # 4-byte Reload
+ adcl 1268(%esp), %ecx
+ movl %ecx, 40(%esp) # 4-byte Spill
+ movl 36(%esp), %ecx # 4-byte Reload
+ adcl 1272(%esp), %ecx
+ movl %ecx, 36(%esp) # 4-byte Spill
+ movl 32(%esp), %ecx # 4-byte Reload
+ adcl 1276(%esp), %ecx
+ movl %ecx, 32(%esp) # 4-byte Spill
+ movl 48(%esp), %ecx # 4-byte Reload
+ adcl 1280(%esp), %ecx
+ movl %ecx, 48(%esp) # 4-byte Spill
+ adcl 1284(%esp), %edi
+ movl %edi, 44(%esp) # 4-byte Spill
+ movl 72(%esp), %ecx # 4-byte Reload
+ adcl 1288(%esp), %ecx
+ movl %ecx, 72(%esp) # 4-byte Spill
+ movl 64(%esp), %ecx # 4-byte Reload
+ adcl 1292(%esp), %ecx
+ movl %ecx, 64(%esp) # 4-byte Spill
+ adcl 1296(%esp), %esi
+ movl %esi, 60(%esp) # 4-byte Spill
+ adcl 1300(%esp), %ebp
+ adcl $0, %eax
+ movl %eax, %edi
+ movl %edx, %esi
+ movl %esi, %eax
+ imull 28(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 1200(%esp), %ecx
+ movl 1452(%esp), %edx
+ calll .LmulPv384x32
+ addl 1200(%esp), %esi
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 1204(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 52(%esp), %esi # 4-byte Reload
+ adcl 1208(%esp), %esi
+ movl 40(%esp), %eax # 4-byte Reload
+ adcl 1212(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 36(%esp), %eax # 4-byte Reload
+ adcl 1216(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 32(%esp), %eax # 4-byte Reload
+ adcl 1220(%esp), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 1224(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 1228(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 1232(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 1236(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 1240(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ adcl 1244(%esp), %ebp
+ adcl 1248(%esp), %edi
+ movl 1448(%esp), %eax
+ movl 8(%eax), %eax
+ movl %eax, (%esp)
+ leal 1144(%esp), %ecx
+ movl 1444(%esp), %edx
+ calll .LmulPv384x32
+ movl 1192(%esp), %eax
+ movl 68(%esp), %edx # 4-byte Reload
+ addl 1144(%esp), %edx
+ adcl 1148(%esp), %esi
+ movl %esi, 52(%esp) # 4-byte Spill
+ movl 40(%esp), %ecx # 4-byte Reload
+ adcl 1152(%esp), %ecx
+ movl %ecx, 40(%esp) # 4-byte Spill
+ movl 36(%esp), %ecx # 4-byte Reload
+ adcl 1156(%esp), %ecx
+ movl %ecx, 36(%esp) # 4-byte Spill
+ movl 32(%esp), %esi # 4-byte Reload
+ adcl 1160(%esp), %esi
+ movl 48(%esp), %ecx # 4-byte Reload
+ adcl 1164(%esp), %ecx
+ movl %ecx, 48(%esp) # 4-byte Spill
+ movl 44(%esp), %ecx # 4-byte Reload
+ adcl 1168(%esp), %ecx
+ movl %ecx, 44(%esp) # 4-byte Spill
+ movl 72(%esp), %ecx # 4-byte Reload
+ adcl 1172(%esp), %ecx
+ movl %ecx, 72(%esp) # 4-byte Spill
+ movl 64(%esp), %ecx # 4-byte Reload
+ adcl 1176(%esp), %ecx
+ movl %ecx, 64(%esp) # 4-byte Spill
+ movl 60(%esp), %ecx # 4-byte Reload
+ adcl 1180(%esp), %ecx
+ movl %ecx, 60(%esp) # 4-byte Spill
+ adcl 1184(%esp), %ebp
+ adcl 1188(%esp), %edi
+ movl %edi, 68(%esp) # 4-byte Spill
+ adcl $0, %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl %edx, %edi
+ movl %edi, %eax
+ imull 28(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 1088(%esp), %ecx
+ movl 1452(%esp), %eax
+ movl %eax, %edx
+ calll .LmulPv384x32
+ addl 1088(%esp), %edi
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 1092(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 40(%esp), %eax # 4-byte Reload
+ adcl 1096(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 36(%esp), %eax # 4-byte Reload
+ adcl 1100(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl %esi, %edi
+ adcl 1104(%esp), %edi
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 1108(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 1112(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 1116(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 1120(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 60(%esp), %esi # 4-byte Reload
+ adcl 1124(%esp), %esi
+ adcl 1128(%esp), %ebp
+ movl %ebp, 76(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 1132(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 56(%esp), %ebp # 4-byte Reload
+ adcl 1136(%esp), %ebp
+ movl 1448(%esp), %eax
+ movl 12(%eax), %eax
+ movl %eax, (%esp)
+ leal 1032(%esp), %ecx
+ movl 1444(%esp), %edx
+ calll .LmulPv384x32
+ movl 1080(%esp), %eax
+ movl 52(%esp), %edx # 4-byte Reload
+ addl 1032(%esp), %edx
+ movl 40(%esp), %ecx # 4-byte Reload
+ adcl 1036(%esp), %ecx
+ movl %ecx, 40(%esp) # 4-byte Spill
+ movl 36(%esp), %ecx # 4-byte Reload
+ adcl 1040(%esp), %ecx
+ movl %ecx, 36(%esp) # 4-byte Spill
+ adcl 1044(%esp), %edi
+ movl %edi, 32(%esp) # 4-byte Spill
+ movl 48(%esp), %ecx # 4-byte Reload
+ adcl 1048(%esp), %ecx
+ movl %ecx, 48(%esp) # 4-byte Spill
+ movl 44(%esp), %ecx # 4-byte Reload
+ adcl 1052(%esp), %ecx
+ movl %ecx, 44(%esp) # 4-byte Spill
+ movl 72(%esp), %ecx # 4-byte Reload
+ adcl 1056(%esp), %ecx
+ movl %ecx, 72(%esp) # 4-byte Spill
+ movl 64(%esp), %ecx # 4-byte Reload
+ adcl 1060(%esp), %ecx
+ movl %ecx, 64(%esp) # 4-byte Spill
+ adcl 1064(%esp), %esi
+ movl %esi, 60(%esp) # 4-byte Spill
+ movl 76(%esp), %ecx # 4-byte Reload
+ adcl 1068(%esp), %ecx
+ movl %ecx, 76(%esp) # 4-byte Spill
+ movl 68(%esp), %ecx # 4-byte Reload
+ adcl 1072(%esp), %ecx
+ movl %ecx, 68(%esp) # 4-byte Spill
+ adcl 1076(%esp), %ebp
+ movl %ebp, 56(%esp) # 4-byte Spill
+ adcl $0, %eax
+ movl %eax, %edi
+ movl %edx, %esi
+ movl %esi, %eax
+ imull 28(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 976(%esp), %ecx
+ movl 1452(%esp), %edx
+ calll .LmulPv384x32
+ addl 976(%esp), %esi
+ movl 40(%esp), %eax # 4-byte Reload
+ adcl 980(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 36(%esp), %eax # 4-byte Reload
+ adcl 984(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 32(%esp), %eax # 4-byte Reload
+ adcl 988(%esp), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 992(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 996(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 1000(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 64(%esp), %esi # 4-byte Reload
+ adcl 1004(%esp), %esi
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 1008(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 76(%esp), %ebp # 4-byte Reload
+ adcl 1012(%esp), %ebp
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 1016(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 1020(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ adcl 1024(%esp), %edi
+ movl %edi, 52(%esp) # 4-byte Spill
+ movl 1448(%esp), %eax
+ movl 16(%eax), %eax
+ movl %eax, (%esp)
+ leal 920(%esp), %ecx
+ movl 1444(%esp), %edx
+ calll .LmulPv384x32
+ movl 968(%esp), %eax
+ movl 40(%esp), %edx # 4-byte Reload
+ addl 920(%esp), %edx
+ movl 36(%esp), %ecx # 4-byte Reload
+ adcl 924(%esp), %ecx
+ movl %ecx, 36(%esp) # 4-byte Spill
+ movl 32(%esp), %ecx # 4-byte Reload
+ adcl 928(%esp), %ecx
+ movl %ecx, 32(%esp) # 4-byte Spill
+ movl 48(%esp), %ecx # 4-byte Reload
+ adcl 932(%esp), %ecx
+ movl %ecx, 48(%esp) # 4-byte Spill
+ movl 44(%esp), %ecx # 4-byte Reload
+ adcl 936(%esp), %ecx
+ movl %ecx, 44(%esp) # 4-byte Spill
+ movl 72(%esp), %ecx # 4-byte Reload
+ adcl 940(%esp), %ecx
+ movl %ecx, 72(%esp) # 4-byte Spill
+ adcl 944(%esp), %esi
+ movl 60(%esp), %ecx # 4-byte Reload
+ adcl 948(%esp), %ecx
+ movl %ecx, 60(%esp) # 4-byte Spill
+ adcl 952(%esp), %ebp
+ movl %ebp, 76(%esp) # 4-byte Spill
+ movl 68(%esp), %edi # 4-byte Reload
+ adcl 956(%esp), %edi
+ movl 56(%esp), %ecx # 4-byte Reload
+ adcl 960(%esp), %ecx
+ movl %ecx, 56(%esp) # 4-byte Spill
+ movl 52(%esp), %ecx # 4-byte Reload
+ adcl 964(%esp), %ecx
+ movl %ecx, 52(%esp) # 4-byte Spill
+ adcl $0, %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl %edx, %ebp
+ movl %ebp, %eax
+ imull 28(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 864(%esp), %ecx
+ movl 1452(%esp), %edx
+ calll .LmulPv384x32
+ addl 864(%esp), %ebp
+ movl 36(%esp), %eax # 4-byte Reload
+ adcl 868(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 32(%esp), %eax # 4-byte Reload
+ adcl 872(%esp), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 876(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 880(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 72(%esp), %ebp # 4-byte Reload
+ adcl 884(%esp), %ebp
+ adcl 888(%esp), %esi
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 892(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 896(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ adcl 900(%esp), %edi
+ movl %edi, 68(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 904(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 908(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 40(%esp), %edi # 4-byte Reload
+ adcl 912(%esp), %edi
+ movl 1448(%esp), %eax
+ movl 20(%eax), %eax
+ movl %eax, (%esp)
+ leal 808(%esp), %ecx
+ movl 1444(%esp), %edx
+ calll .LmulPv384x32
+ movl 856(%esp), %edx
+ movl 36(%esp), %ecx # 4-byte Reload
+ addl 808(%esp), %ecx
+ movl 32(%esp), %eax # 4-byte Reload
+ adcl 812(%esp), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 816(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 820(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ adcl 824(%esp), %ebp
+ movl %ebp, 72(%esp) # 4-byte Spill
+ adcl 828(%esp), %esi
+ movl %esi, 64(%esp) # 4-byte Spill
+ movl 60(%esp), %ebp # 4-byte Reload
+ adcl 832(%esp), %ebp
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 836(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 840(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 844(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 848(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ adcl 852(%esp), %edi
+ movl %edi, 40(%esp) # 4-byte Spill
+ adcl $0, %edx
+ movl %edx, 36(%esp) # 4-byte Spill
+ movl %ecx, %esi
+ movl %esi, %eax
+ imull 28(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 752(%esp), %ecx
+ movl 1452(%esp), %eax
+ movl %eax, %edx
+ calll .LmulPv384x32
+ addl 752(%esp), %esi
+ movl 32(%esp), %eax # 4-byte Reload
+ adcl 756(%esp), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 48(%esp), %edi # 4-byte Reload
+ adcl 760(%esp), %edi
+ movl 44(%esp), %esi # 4-byte Reload
+ adcl 764(%esp), %esi
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 768(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 772(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ adcl 776(%esp), %ebp
+ movl %ebp, 60(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 780(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 784(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 788(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 52(%esp), %ebp # 4-byte Reload
+ adcl 792(%esp), %ebp
+ movl 40(%esp), %eax # 4-byte Reload
+ adcl 796(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 36(%esp), %eax # 4-byte Reload
+ adcl 800(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 1448(%esp), %ecx
+ movl %ecx, %eax
+ movl 24(%eax), %eax
+ movl %eax, (%esp)
+ leal 696(%esp), %ecx
+ movl 1444(%esp), %eax
+ movl %eax, %edx
+ calll .LmulPv384x32
+ movl 744(%esp), %ecx
+ movl 32(%esp), %eax # 4-byte Reload
+ addl 696(%esp), %eax
+ adcl 700(%esp), %edi
+ movl %edi, 48(%esp) # 4-byte Spill
+ adcl 704(%esp), %esi
+ movl %esi, 44(%esp) # 4-byte Spill
+ movl 72(%esp), %edx # 4-byte Reload
+ adcl 708(%esp), %edx
+ movl %edx, 72(%esp) # 4-byte Spill
+ movl 64(%esp), %edx # 4-byte Reload
+ adcl 712(%esp), %edx
+ movl %edx, 64(%esp) # 4-byte Spill
+ movl 60(%esp), %esi # 4-byte Reload
+ adcl 716(%esp), %esi
+ movl 76(%esp), %edx # 4-byte Reload
+ adcl 720(%esp), %edx
+ movl %edx, 76(%esp) # 4-byte Spill
+ movl 68(%esp), %edx # 4-byte Reload
+ adcl 724(%esp), %edx
+ movl %edx, 68(%esp) # 4-byte Spill
+ movl 56(%esp), %edi # 4-byte Reload
+ adcl 728(%esp), %edi
+ adcl 732(%esp), %ebp
+ movl %ebp, 52(%esp) # 4-byte Spill
+ movl 40(%esp), %edx # 4-byte Reload
+ adcl 736(%esp), %edx
+ movl %edx, 40(%esp) # 4-byte Spill
+ movl 36(%esp), %edx # 4-byte Reload
+ adcl 740(%esp), %edx
+ movl %edx, 36(%esp) # 4-byte Spill
+ adcl $0, %ecx
+ movl %ecx, 32(%esp) # 4-byte Spill
+ movl %eax, %ebp
+ imull 28(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 640(%esp), %ecx
+ movl 1452(%esp), %edx
+ calll .LmulPv384x32
+ addl 640(%esp), %ebp
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 644(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 44(%esp), %ebp # 4-byte Reload
+ adcl 648(%esp), %ebp
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 652(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 656(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ adcl 660(%esp), %esi
+ movl %esi, 60(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 664(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 68(%esp), %esi # 4-byte Reload
+ adcl 668(%esp), %esi
+ adcl 672(%esp), %edi
+ movl %edi, 56(%esp) # 4-byte Spill
+ movl 52(%esp), %edi # 4-byte Reload
+ adcl 676(%esp), %edi
+ movl 40(%esp), %eax # 4-byte Reload
+ adcl 680(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 36(%esp), %eax # 4-byte Reload
+ adcl 684(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 32(%esp), %eax # 4-byte Reload
+ adcl 688(%esp), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 1448(%esp), %eax
+ movl 28(%eax), %eax
+ movl %eax, (%esp)
+ leal 584(%esp), %ecx
+ movl 1444(%esp), %edx
+ calll .LmulPv384x32
+ movl 632(%esp), %edx
+ movl 48(%esp), %ecx # 4-byte Reload
+ addl 584(%esp), %ecx
+ adcl 588(%esp), %ebp
+ movl %ebp, 44(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 592(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 64(%esp), %ebp # 4-byte Reload
+ adcl 596(%esp), %ebp
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 600(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 604(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ adcl 608(%esp), %esi
+ movl %esi, 68(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 612(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ adcl 616(%esp), %edi
+ movl %edi, 52(%esp) # 4-byte Spill
+ movl 40(%esp), %edi # 4-byte Reload
+ adcl 620(%esp), %edi
+ movl 36(%esp), %eax # 4-byte Reload
+ adcl 624(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 32(%esp), %eax # 4-byte Reload
+ adcl 628(%esp), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ adcl $0, %edx
+ movl %edx, 48(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ movl %ecx, %esi
+ imull 28(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 528(%esp), %ecx
+ movl 1452(%esp), %edx
+ calll .LmulPv384x32
+ addl 528(%esp), %esi
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 532(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 536(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ adcl 540(%esp), %ebp
+ movl %ebp, 64(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 544(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 548(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 552(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 556(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 560(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ adcl 564(%esp), %edi
+ movl %edi, 40(%esp) # 4-byte Spill
+ movl 36(%esp), %edi # 4-byte Reload
+ adcl 568(%esp), %edi
+ movl 32(%esp), %esi # 4-byte Reload
+ adcl 572(%esp), %esi
+ movl 48(%esp), %ebp # 4-byte Reload
+ adcl 576(%esp), %ebp
+ movl 1448(%esp), %eax
+ movl 32(%eax), %eax
+ movl %eax, (%esp)
+ leal 472(%esp), %ecx
+ movl 1444(%esp), %edx
+ calll .LmulPv384x32
+ movl 520(%esp), %edx
+ movl 44(%esp), %ecx # 4-byte Reload
+ addl 472(%esp), %ecx
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 476(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 480(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 484(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 488(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 492(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 496(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 500(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 40(%esp), %eax # 4-byte Reload
+ adcl 504(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ adcl 508(%esp), %edi
+ movl %edi, 36(%esp) # 4-byte Spill
+ adcl 512(%esp), %esi
+ movl %esi, 32(%esp) # 4-byte Spill
+ adcl 516(%esp), %ebp
+ movl %ebp, 48(%esp) # 4-byte Spill
+ adcl $0, %edx
+ movl %edx, 44(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ movl %ecx, %ebp
+ imull 28(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 416(%esp), %ecx
+ movl 1452(%esp), %edx
+ calll .LmulPv384x32
+ addl 416(%esp), %ebp
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 420(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 424(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 428(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 76(%esp), %edi # 4-byte Reload
+ adcl 432(%esp), %edi
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 436(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 56(%esp), %ebp # 4-byte Reload
+ adcl 440(%esp), %ebp
+ movl 52(%esp), %esi # 4-byte Reload
+ adcl 444(%esp), %esi
+ movl 40(%esp), %eax # 4-byte Reload
+ adcl 448(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 36(%esp), %eax # 4-byte Reload
+ adcl 452(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 32(%esp), %eax # 4-byte Reload
+ adcl 456(%esp), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 460(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 464(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 1448(%esp), %eax
+ movl 36(%eax), %eax
+ movl %eax, (%esp)
+ leal 360(%esp), %ecx
+ movl 1444(%esp), %edx
+ calll .LmulPv384x32
+ movl 408(%esp), %edx
+ movl 72(%esp), %ecx # 4-byte Reload
+ addl 360(%esp), %ecx
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 364(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 368(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ adcl 372(%esp), %edi
+ movl %edi, 76(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 376(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ adcl 380(%esp), %ebp
+ movl %ebp, 56(%esp) # 4-byte Spill
+ adcl 384(%esp), %esi
+ movl 40(%esp), %eax # 4-byte Reload
+ adcl 388(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 36(%esp), %eax # 4-byte Reload
+ adcl 392(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 32(%esp), %eax # 4-byte Reload
+ adcl 396(%esp), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 400(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 404(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ adcl $0, %edx
+ movl %edx, 72(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ movl %ecx, %ebp
+ imull 28(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 304(%esp), %ecx
+ movl 1452(%esp), %edx
+ calll .LmulPv384x32
+ addl 304(%esp), %ebp
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 308(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 60(%esp), %edi # 4-byte Reload
+ adcl 312(%esp), %edi
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 316(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 68(%esp), %ebp # 4-byte Reload
+ adcl 320(%esp), %ebp
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 324(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ adcl 328(%esp), %esi
+ movl %esi, 52(%esp) # 4-byte Spill
+ movl 40(%esp), %esi # 4-byte Reload
+ adcl 332(%esp), %esi
+ movl 36(%esp), %eax # 4-byte Reload
+ adcl 336(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 32(%esp), %eax # 4-byte Reload
+ adcl 340(%esp), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 344(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 348(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 352(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 1448(%esp), %eax
+ movl 40(%eax), %eax
+ movl %eax, (%esp)
+ leal 248(%esp), %ecx
+ movl 1444(%esp), %edx
+ calll .LmulPv384x32
+ movl 296(%esp), %edx
+ movl 64(%esp), %ecx # 4-byte Reload
+ addl 248(%esp), %ecx
+ adcl 252(%esp), %edi
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 256(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ adcl 260(%esp), %ebp
+ movl %ebp, 68(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 264(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 268(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl %esi, %ebp
+ adcl 272(%esp), %ebp
+ movl 36(%esp), %eax # 4-byte Reload
+ adcl 276(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 32(%esp), %eax # 4-byte Reload
+ adcl 280(%esp), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 284(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 288(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 292(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ adcl $0, %edx
+ movl %edx, 64(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ movl %ecx, %esi
+ imull 28(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 192(%esp), %ecx
+ movl 1452(%esp), %edx
+ calll .LmulPv384x32
+ addl 192(%esp), %esi
+ adcl 196(%esp), %edi
+ movl %edi, 60(%esp) # 4-byte Spill
+ movl 76(%esp), %edi # 4-byte Reload
+ adcl 200(%esp), %edi
+ movl 68(%esp), %esi # 4-byte Reload
+ adcl 204(%esp), %esi
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 208(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 212(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ adcl 216(%esp), %ebp
+ movl %ebp, 40(%esp) # 4-byte Spill
+ movl 36(%esp), %eax # 4-byte Reload
+ adcl 220(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 32(%esp), %ebp # 4-byte Reload
+ adcl 224(%esp), %ebp
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 228(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 232(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 236(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 240(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 1448(%esp), %eax
+ movl 44(%eax), %eax
+ movl %eax, (%esp)
+ leal 136(%esp), %ecx
+ movl 1444(%esp), %edx
+ calll .LmulPv384x32
+ movl 184(%esp), %edx
+ movl 60(%esp), %ecx # 4-byte Reload
+ addl 136(%esp), %ecx
+ adcl 140(%esp), %edi
+ movl %edi, 76(%esp) # 4-byte Spill
+ adcl 144(%esp), %esi
+ movl %esi, 68(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 148(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 152(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 40(%esp), %eax # 4-byte Reload
+ adcl 156(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 36(%esp), %edi # 4-byte Reload
+ adcl 160(%esp), %edi
+ adcl 164(%esp), %ebp
+ movl %ebp, 32(%esp) # 4-byte Spill
+ movl 48(%esp), %ebp # 4-byte Reload
+ adcl 168(%esp), %ebp
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 172(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 176(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 180(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ adcl $0, %edx
+ movl %edx, 60(%esp) # 4-byte Spill
+ movl 28(%esp), %eax # 4-byte Reload
+ imull %ecx, %eax
+ movl %ecx, %esi
+ movl %eax, (%esp)
+ leal 80(%esp), %ecx
+ movl 1452(%esp), %eax
+ movl %eax, %edx
+ calll .LmulPv384x32
+ addl 80(%esp), %esi
+ movl 56(%esp), %esi # 4-byte Reload
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 84(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 68(%esp), %ecx # 4-byte Reload
+ adcl 88(%esp), %ecx
+ movl %ecx, 68(%esp) # 4-byte Spill
+ adcl 92(%esp), %esi
+ movl 52(%esp), %edx # 4-byte Reload
+ adcl 96(%esp), %edx
+ movl %edx, 52(%esp) # 4-byte Spill
+ movl 40(%esp), %edx # 4-byte Reload
+ adcl 100(%esp), %edx
+ movl %edx, 40(%esp) # 4-byte Spill
+ adcl 104(%esp), %edi
+ movl %edi, 36(%esp) # 4-byte Spill
+ movl 32(%esp), %edx # 4-byte Reload
+ adcl 108(%esp), %edx
+ movl %edx, 32(%esp) # 4-byte Spill
+ adcl 112(%esp), %ebp
+ movl %ebp, 48(%esp) # 4-byte Spill
+ movl 44(%esp), %edx # 4-byte Reload
+ adcl 116(%esp), %edx
+ movl %edx, 44(%esp) # 4-byte Spill
+ movl 72(%esp), %edx # 4-byte Reload
+ adcl 120(%esp), %edx
+ movl %edx, 72(%esp) # 4-byte Spill
+ movl 64(%esp), %edx # 4-byte Reload
+ adcl 124(%esp), %edx
+ movl %edx, 64(%esp) # 4-byte Spill
+ movl 60(%esp), %edx # 4-byte Reload
+ adcl 128(%esp), %edx
+ movl %edx, 60(%esp) # 4-byte Spill
+ movl %eax, %edx
+ movl 1452(%esp), %ebp
+ subl (%ebp), %edx
+ movl %ecx, %eax
+ sbbl 4(%ebp), %eax
+ movl %esi, %ebx
+ sbbl 8(%ebp), %ebx
+ movl 52(%esp), %ecx # 4-byte Reload
+ sbbl 12(%ebp), %ecx
+ movl 40(%esp), %edi # 4-byte Reload
+ sbbl 16(%ebp), %edi
+ movl %edi, 4(%esp) # 4-byte Spill
+ movl 36(%esp), %edi # 4-byte Reload
+ sbbl 20(%ebp), %edi
+ movl %edi, 8(%esp) # 4-byte Spill
+ movl 32(%esp), %edi # 4-byte Reload
+ sbbl 24(%ebp), %edi
+ movl %edi, 12(%esp) # 4-byte Spill
+ movl 48(%esp), %edi # 4-byte Reload
+ sbbl 28(%ebp), %edi
+ movl %edi, 16(%esp) # 4-byte Spill
+ movl 44(%esp), %edi # 4-byte Reload
+ sbbl 32(%ebp), %edi
+ movl %edi, 20(%esp) # 4-byte Spill
+ movl 72(%esp), %edi # 4-byte Reload
+ sbbl 36(%ebp), %edi
+ movl %edi, 24(%esp) # 4-byte Spill
+ movl 64(%esp), %edi # 4-byte Reload
+ sbbl 40(%ebp), %edi
+ movl %edi, 28(%esp) # 4-byte Spill
+ movl 60(%esp), %edi # 4-byte Reload
+ sbbl 44(%ebp), %edi
+ movl %edi, 56(%esp) # 4-byte Spill
+ movl %edi, %ebp
+ sarl $31, %ebp
+ testl %ebp, %ebp
+ movl 76(%esp), %ebp # 4-byte Reload
+ js .LBB179_2
+# BB#1:
+ movl %edx, %ebp
+.LBB179_2:
+ movl 1440(%esp), %edx
+ movl %ebp, (%edx)
+ movl 68(%esp), %edi # 4-byte Reload
+ js .LBB179_4
+# BB#3:
+ movl %eax, %edi
+.LBB179_4:
+ movl %edi, 4(%edx)
+ js .LBB179_6
+# BB#5:
+ movl %ebx, %esi
+.LBB179_6:
+ movl %esi, 8(%edx)
+ movl 52(%esp), %eax # 4-byte Reload
+ js .LBB179_8
+# BB#7:
+ movl %ecx, %eax
+.LBB179_8:
+ movl %eax, 12(%edx)
+ movl 40(%esp), %eax # 4-byte Reload
+ js .LBB179_10
+# BB#9:
+ movl 4(%esp), %eax # 4-byte Reload
+.LBB179_10:
+ movl %eax, 16(%edx)
+ movl 36(%esp), %eax # 4-byte Reload
+ js .LBB179_12
+# BB#11:
+ movl 8(%esp), %eax # 4-byte Reload
+.LBB179_12:
+ movl %eax, 20(%edx)
+ movl 32(%esp), %eax # 4-byte Reload
+ js .LBB179_14
+# BB#13:
+ movl 12(%esp), %eax # 4-byte Reload
+.LBB179_14:
+ movl %eax, 24(%edx)
+ movl 48(%esp), %eax # 4-byte Reload
+ js .LBB179_16
+# BB#15:
+ movl 16(%esp), %eax # 4-byte Reload
+.LBB179_16:
+ movl %eax, 28(%edx)
+ movl 44(%esp), %eax # 4-byte Reload
+ js .LBB179_18
+# BB#17:
+ movl 20(%esp), %eax # 4-byte Reload
+.LBB179_18:
+ movl %eax, 32(%edx)
+ movl 72(%esp), %eax # 4-byte Reload
+ js .LBB179_20
+# BB#19:
+ movl 24(%esp), %eax # 4-byte Reload
+.LBB179_20:
+ movl %eax, 36(%edx)
+ movl 64(%esp), %eax # 4-byte Reload
+ js .LBB179_22
+# BB#21:
+ movl 28(%esp), %eax # 4-byte Reload
+.LBB179_22:
+ movl %eax, 40(%edx)
+ movl 60(%esp), %eax # 4-byte Reload
+ js .LBB179_24
+# BB#23:
+ movl 56(%esp), %eax # 4-byte Reload
+.LBB179_24:
+ movl %eax, 44(%edx)
+ addl $1420, %esp # imm = 0x58C
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end179:
+ .size mcl_fp_montNF12L, .Lfunc_end179-mcl_fp_montNF12L
+
+ .globl mcl_fp_montRed12L
+ .align 16, 0x90
+ .type mcl_fp_montRed12L,@function
+mcl_fp_montRed12L: # @mcl_fp_montRed12L
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $828, %esp # imm = 0x33C
+ calll .L180$pb
+.L180$pb:
+ popl %eax
+.Ltmp31:
+ addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp31-.L180$pb), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 856(%esp), %edx
+ movl -4(%edx), %esi
+ movl %esi, 84(%esp) # 4-byte Spill
+ movl 852(%esp), %ecx
+ movl (%ecx), %ebx
+ movl %ebx, 88(%esp) # 4-byte Spill
+ movl 4(%ecx), %edi
+ movl %edi, 100(%esp) # 4-byte Spill
+ imull %esi, %ebx
+ movl 92(%ecx), %esi
+ movl %esi, 104(%esp) # 4-byte Spill
+ movl 88(%ecx), %esi
+ movl %esi, 108(%esp) # 4-byte Spill
+ movl 84(%ecx), %esi
+ movl %esi, 120(%esp) # 4-byte Spill
+ movl 80(%ecx), %eax
+ movl %eax, 128(%esp) # 4-byte Spill
+ movl 76(%ecx), %eax
+ movl %eax, 144(%esp) # 4-byte Spill
+ movl 72(%ecx), %eax
+ movl %eax, 124(%esp) # 4-byte Spill
+ movl 68(%ecx), %edi
+ movl %edi, 140(%esp) # 4-byte Spill
+ movl 64(%ecx), %eax
+ movl %eax, 116(%esp) # 4-byte Spill
+ movl 60(%ecx), %esi
+ movl %esi, 148(%esp) # 4-byte Spill
+ movl 56(%ecx), %eax
+ movl %eax, 136(%esp) # 4-byte Spill
+ movl 52(%ecx), %esi
+ movl %esi, 156(%esp) # 4-byte Spill
+ movl 48(%ecx), %edi
+ movl %edi, 152(%esp) # 4-byte Spill
+ movl 44(%ecx), %edi
+ movl %edi, 132(%esp) # 4-byte Spill
+ movl 40(%ecx), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 36(%ecx), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 32(%ecx), %edi
+ movl 28(%ecx), %esi
+ movl 24(%ecx), %ebp
+ movl %ebp, 80(%esp) # 4-byte Spill
+ movl 20(%ecx), %ebp
+ movl 16(%ecx), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 12(%ecx), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 8(%ecx), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl (%edx), %ecx
+ movl %ecx, 24(%esp) # 4-byte Spill
+ movl 44(%edx), %ecx
+ movl %ecx, 60(%esp) # 4-byte Spill
+ movl 40(%edx), %ecx
+ movl %ecx, 56(%esp) # 4-byte Spill
+ movl 36(%edx), %ecx
+ movl %ecx, 52(%esp) # 4-byte Spill
+ movl 32(%edx), %ecx
+ movl %ecx, 48(%esp) # 4-byte Spill
+ movl 28(%edx), %ecx
+ movl %ecx, 44(%esp) # 4-byte Spill
+ movl 24(%edx), %ecx
+ movl %ecx, 40(%esp) # 4-byte Spill
+ movl 20(%edx), %ecx
+ movl %ecx, 36(%esp) # 4-byte Spill
+ movl 16(%edx), %ecx
+ movl %ecx, 32(%esp) # 4-byte Spill
+ movl 12(%edx), %ecx
+ movl %ecx, 28(%esp) # 4-byte Spill
+ movl 8(%edx), %ecx
+ movl %ecx, 20(%esp) # 4-byte Spill
+ movl 4(%edx), %ecx
+ movl %ecx, 16(%esp) # 4-byte Spill
+ movl %ebx, (%esp)
+ leal 776(%esp), %ecx
+ movl 92(%esp), %ebx # 4-byte Reload
+ calll .LmulPv384x32
+ movl 88(%esp), %eax # 4-byte Reload
+ addl 776(%esp), %eax
+ movl 100(%esp), %ecx # 4-byte Reload
+ adcl 780(%esp), %ecx
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 784(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 788(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 792(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ adcl 796(%esp), %ebp
+ movl %ebp, 72(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 800(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ adcl 804(%esp), %esi
+ movl %esi, 88(%esp) # 4-byte Spill
+ adcl 808(%esp), %edi
+ movl %edi, 100(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 812(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 112(%esp), %eax # 4-byte Reload
+ adcl 816(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 132(%esp), %eax # 4-byte Reload
+ adcl 820(%esp), %eax
+ movl %eax, 132(%esp) # 4-byte Spill
+ movl 152(%esp), %eax # 4-byte Reload
+ adcl 824(%esp), %eax
+ movl %eax, 152(%esp) # 4-byte Spill
+ adcl $0, 156(%esp) # 4-byte Folded Spill
+ movl 136(%esp), %edi # 4-byte Reload
+ adcl $0, %edi
+ adcl $0, 148(%esp) # 4-byte Folded Spill
+ adcl $0, 116(%esp) # 4-byte Folded Spill
+ adcl $0, 140(%esp) # 4-byte Folded Spill
+ adcl $0, 124(%esp) # 4-byte Folded Spill
+ adcl $0, 144(%esp) # 4-byte Folded Spill
+ adcl $0, 128(%esp) # 4-byte Folded Spill
+ adcl $0, 120(%esp) # 4-byte Folded Spill
+ adcl $0, 108(%esp) # 4-byte Folded Spill
+ adcl $0, 104(%esp) # 4-byte Folded Spill
+ sbbl %ebp, %ebp
+ movl %ecx, %esi
+ movl %esi, %eax
+ imull 84(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 720(%esp), %ecx
+ movl 856(%esp), %edx
+ movl 92(%esp), %ebx # 4-byte Reload
+ calll .LmulPv384x32
+ andl $1, %ebp
+ movl %ebp, %ecx
+ addl 720(%esp), %esi
+ movl 76(%esp), %ebp # 4-byte Reload
+ adcl 724(%esp), %ebp
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 728(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 732(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 736(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 740(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 744(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 748(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 96(%esp), %esi # 4-byte Reload
+ adcl 752(%esp), %esi
+ movl 112(%esp), %eax # 4-byte Reload
+ adcl 756(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 132(%esp), %eax # 4-byte Reload
+ adcl 760(%esp), %eax
+ movl %eax, 132(%esp) # 4-byte Spill
+ movl 152(%esp), %eax # 4-byte Reload
+ adcl 764(%esp), %eax
+ movl %eax, 152(%esp) # 4-byte Spill
+ movl 156(%esp), %eax # 4-byte Reload
+ adcl 768(%esp), %eax
+ movl %eax, 156(%esp) # 4-byte Spill
+ adcl $0, %edi
+ adcl $0, 148(%esp) # 4-byte Folded Spill
+ adcl $0, 116(%esp) # 4-byte Folded Spill
+ adcl $0, 140(%esp) # 4-byte Folded Spill
+ adcl $0, 124(%esp) # 4-byte Folded Spill
+ adcl $0, 144(%esp) # 4-byte Folded Spill
+ adcl $0, 128(%esp) # 4-byte Folded Spill
+ adcl $0, 120(%esp) # 4-byte Folded Spill
+ adcl $0, 108(%esp) # 4-byte Folded Spill
+ adcl $0, 104(%esp) # 4-byte Folded Spill
+ adcl $0, %ecx
+ movl %ecx, 76(%esp) # 4-byte Spill
+ movl %ebp, %eax
+ imull 84(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 664(%esp), %ecx
+ movl 856(%esp), %edx
+ movl 92(%esp), %ebx # 4-byte Reload
+ calll .LmulPv384x32
+ addl 664(%esp), %ebp
+ movl 64(%esp), %ecx # 4-byte Reload
+ adcl 668(%esp), %ecx
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 672(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 676(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 680(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 684(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 688(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ adcl 692(%esp), %esi
+ movl %esi, 96(%esp) # 4-byte Spill
+ movl 112(%esp), %ebp # 4-byte Reload
+ adcl 696(%esp), %ebp
+ movl 132(%esp), %eax # 4-byte Reload
+ adcl 700(%esp), %eax
+ movl %eax, 132(%esp) # 4-byte Spill
+ movl 152(%esp), %eax # 4-byte Reload
+ adcl 704(%esp), %eax
+ movl %eax, 152(%esp) # 4-byte Spill
+ movl 156(%esp), %eax # 4-byte Reload
+ adcl 708(%esp), %eax
+ movl %eax, 156(%esp) # 4-byte Spill
+ adcl 712(%esp), %edi
+ movl %edi, 136(%esp) # 4-byte Spill
+ adcl $0, 148(%esp) # 4-byte Folded Spill
+ adcl $0, 116(%esp) # 4-byte Folded Spill
+ adcl $0, 140(%esp) # 4-byte Folded Spill
+ adcl $0, 124(%esp) # 4-byte Folded Spill
+ movl 144(%esp), %edi # 4-byte Reload
+ adcl $0, %edi
+ adcl $0, 128(%esp) # 4-byte Folded Spill
+ adcl $0, 120(%esp) # 4-byte Folded Spill
+ adcl $0, 108(%esp) # 4-byte Folded Spill
+ adcl $0, 104(%esp) # 4-byte Folded Spill
+ adcl $0, 76(%esp) # 4-byte Folded Spill
+ movl %ecx, %eax
+ movl %ecx, %esi
+ imull 84(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 608(%esp), %ecx
+ movl 856(%esp), %edx
+ movl 92(%esp), %ebx # 4-byte Reload
+ calll .LmulPv384x32
+ addl 608(%esp), %esi
+ movl 68(%esp), %esi # 4-byte Reload
+ adcl 612(%esp), %esi
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 616(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 620(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 624(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 628(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 632(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ adcl 636(%esp), %ebp
+ movl %ebp, 112(%esp) # 4-byte Spill
+ movl 132(%esp), %eax # 4-byte Reload
+ adcl 640(%esp), %eax
+ movl %eax, 132(%esp) # 4-byte Spill
+ movl 152(%esp), %eax # 4-byte Reload
+ adcl 644(%esp), %eax
+ movl %eax, 152(%esp) # 4-byte Spill
+ movl 156(%esp), %eax # 4-byte Reload
+ adcl 648(%esp), %eax
+ movl %eax, 156(%esp) # 4-byte Spill
+ movl 136(%esp), %eax # 4-byte Reload
+ adcl 652(%esp), %eax
+ movl %eax, 136(%esp) # 4-byte Spill
+ movl 148(%esp), %eax # 4-byte Reload
+ adcl 656(%esp), %eax
+ movl %eax, 148(%esp) # 4-byte Spill
+ adcl $0, 116(%esp) # 4-byte Folded Spill
+ adcl $0, 140(%esp) # 4-byte Folded Spill
+ adcl $0, 124(%esp) # 4-byte Folded Spill
+ adcl $0, %edi
+ movl %edi, 144(%esp) # 4-byte Spill
+ adcl $0, 128(%esp) # 4-byte Folded Spill
+ movl 120(%esp), %ebp # 4-byte Reload
+ adcl $0, %ebp
+ movl 108(%esp), %edi # 4-byte Reload
+ adcl $0, %edi
+ adcl $0, 104(%esp) # 4-byte Folded Spill
+ adcl $0, 76(%esp) # 4-byte Folded Spill
+ movl %esi, %eax
+ imull 84(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 552(%esp), %ecx
+ movl 856(%esp), %edx
+ movl 92(%esp), %ebx # 4-byte Reload
+ calll .LmulPv384x32
+ addl 552(%esp), %esi
+ movl 72(%esp), %ecx # 4-byte Reload
+ adcl 556(%esp), %ecx
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 560(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 564(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 568(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 572(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 112(%esp), %eax # 4-byte Reload
+ adcl 576(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 132(%esp), %eax # 4-byte Reload
+ adcl 580(%esp), %eax
+ movl %eax, 132(%esp) # 4-byte Spill
+ movl 152(%esp), %eax # 4-byte Reload
+ adcl 584(%esp), %eax
+ movl %eax, 152(%esp) # 4-byte Spill
+ movl 156(%esp), %eax # 4-byte Reload
+ adcl 588(%esp), %eax
+ movl %eax, 156(%esp) # 4-byte Spill
+ movl 136(%esp), %eax # 4-byte Reload
+ adcl 592(%esp), %eax
+ movl %eax, 136(%esp) # 4-byte Spill
+ movl 148(%esp), %eax # 4-byte Reload
+ adcl 596(%esp), %eax
+ movl %eax, 148(%esp) # 4-byte Spill
+ movl 116(%esp), %eax # 4-byte Reload
+ adcl 600(%esp), %eax
+ movl %eax, 116(%esp) # 4-byte Spill
+ adcl $0, 140(%esp) # 4-byte Folded Spill
+ adcl $0, 124(%esp) # 4-byte Folded Spill
+ adcl $0, 144(%esp) # 4-byte Folded Spill
+ adcl $0, 128(%esp) # 4-byte Folded Spill
+ adcl $0, %ebp
+ movl %ebp, 120(%esp) # 4-byte Spill
+ adcl $0, %edi
+ movl %edi, 108(%esp) # 4-byte Spill
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl $0, %eax
+ movl %eax, %esi
+ adcl $0, 76(%esp) # 4-byte Folded Spill
+ movl %ecx, %edi
+ movl %edi, %eax
+ imull 84(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 496(%esp), %ecx
+ movl 856(%esp), %edx
+ movl 92(%esp), %ebx # 4-byte Reload
+ calll .LmulPv384x32
+ addl 496(%esp), %edi
+ movl 80(%esp), %ecx # 4-byte Reload
+ adcl 500(%esp), %ecx
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 504(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 508(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 512(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 112(%esp), %eax # 4-byte Reload
+ adcl 516(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 132(%esp), %eax # 4-byte Reload
+ adcl 520(%esp), %eax
+ movl %eax, 132(%esp) # 4-byte Spill
+ movl 152(%esp), %eax # 4-byte Reload
+ adcl 524(%esp), %eax
+ movl %eax, 152(%esp) # 4-byte Spill
+ movl 156(%esp), %ebp # 4-byte Reload
+ adcl 528(%esp), %ebp
+ movl 136(%esp), %edi # 4-byte Reload
+ adcl 532(%esp), %edi
+ movl 148(%esp), %eax # 4-byte Reload
+ adcl 536(%esp), %eax
+ movl %eax, 148(%esp) # 4-byte Spill
+ movl 116(%esp), %eax # 4-byte Reload
+ adcl 540(%esp), %eax
+ movl %eax, 116(%esp) # 4-byte Spill
+ movl 140(%esp), %eax # 4-byte Reload
+ adcl 544(%esp), %eax
+ movl %eax, 140(%esp) # 4-byte Spill
+ adcl $0, 124(%esp) # 4-byte Folded Spill
+ adcl $0, 144(%esp) # 4-byte Folded Spill
+ adcl $0, 128(%esp) # 4-byte Folded Spill
+ adcl $0, 120(%esp) # 4-byte Folded Spill
+ adcl $0, 108(%esp) # 4-byte Folded Spill
+ adcl $0, %esi
+ movl %esi, 104(%esp) # 4-byte Spill
+ adcl $0, 76(%esp) # 4-byte Folded Spill
+ movl %ecx, %eax
+ movl %ecx, %esi
+ imull 84(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 440(%esp), %ecx
+ movl 856(%esp), %edx
+ movl 92(%esp), %ebx # 4-byte Reload
+ calll .LmulPv384x32
+ addl 440(%esp), %esi
+ movl 88(%esp), %ecx # 4-byte Reload
+ adcl 444(%esp), %ecx
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 448(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 452(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 112(%esp), %eax # 4-byte Reload
+ adcl 456(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 132(%esp), %eax # 4-byte Reload
+ adcl 460(%esp), %eax
+ movl %eax, 132(%esp) # 4-byte Spill
+ movl 152(%esp), %eax # 4-byte Reload
+ adcl 464(%esp), %eax
+ movl %eax, 152(%esp) # 4-byte Spill
+ adcl 468(%esp), %ebp
+ movl %ebp, 156(%esp) # 4-byte Spill
+ adcl 472(%esp), %edi
+ movl %edi, 136(%esp) # 4-byte Spill
+ movl 148(%esp), %esi # 4-byte Reload
+ adcl 476(%esp), %esi
+ movl 116(%esp), %eax # 4-byte Reload
+ adcl 480(%esp), %eax
+ movl %eax, 116(%esp) # 4-byte Spill
+ movl 140(%esp), %eax # 4-byte Reload
+ adcl 484(%esp), %eax
+ movl %eax, 140(%esp) # 4-byte Spill
+ movl 124(%esp), %eax # 4-byte Reload
+ adcl 488(%esp), %eax
+ movl %eax, 124(%esp) # 4-byte Spill
+ adcl $0, 144(%esp) # 4-byte Folded Spill
+ adcl $0, 128(%esp) # 4-byte Folded Spill
+ adcl $0, 120(%esp) # 4-byte Folded Spill
+ adcl $0, 108(%esp) # 4-byte Folded Spill
+ adcl $0, 104(%esp) # 4-byte Folded Spill
+ adcl $0, 76(%esp) # 4-byte Folded Spill
+ movl %ecx, %edi
+ movl %edi, %eax
+ imull 84(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 384(%esp), %ecx
+ movl 856(%esp), %eax
+ movl %eax, %edx
+ movl 92(%esp), %ebx # 4-byte Reload
+ calll .LmulPv384x32
+ addl 384(%esp), %edi
+ movl 100(%esp), %ecx # 4-byte Reload
+ adcl 388(%esp), %ecx
+ movl %ecx, 100(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 392(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 112(%esp), %eax # 4-byte Reload
+ adcl 396(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 132(%esp), %ebp # 4-byte Reload
+ adcl 400(%esp), %ebp
+ movl 152(%esp), %edi # 4-byte Reload
+ adcl 404(%esp), %edi
+ movl 156(%esp), %eax # 4-byte Reload
+ adcl 408(%esp), %eax
+ movl %eax, 156(%esp) # 4-byte Spill
+ movl 136(%esp), %eax # 4-byte Reload
+ adcl 412(%esp), %eax
+ movl %eax, 136(%esp) # 4-byte Spill
+ adcl 416(%esp), %esi
+ movl %esi, 148(%esp) # 4-byte Spill
+ movl 116(%esp), %eax # 4-byte Reload
+ adcl 420(%esp), %eax
+ movl %eax, 116(%esp) # 4-byte Spill
+ movl 140(%esp), %esi # 4-byte Reload
+ adcl 424(%esp), %esi
+ movl 124(%esp), %eax # 4-byte Reload
+ adcl 428(%esp), %eax
+ movl %eax, 124(%esp) # 4-byte Spill
+ movl 144(%esp), %eax # 4-byte Reload
+ adcl 432(%esp), %eax
+ movl %eax, 144(%esp) # 4-byte Spill
+ adcl $0, 128(%esp) # 4-byte Folded Spill
+ adcl $0, 120(%esp) # 4-byte Folded Spill
+ adcl $0, 108(%esp) # 4-byte Folded Spill
+ adcl $0, 104(%esp) # 4-byte Folded Spill
+ adcl $0, 76(%esp) # 4-byte Folded Spill
+ movl %ecx, %eax
+ imull 84(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 328(%esp), %ecx
+ movl 856(%esp), %edx
+ movl 92(%esp), %ebx # 4-byte Reload
+ calll .LmulPv384x32
+ movl 100(%esp), %eax # 4-byte Reload
+ addl 328(%esp), %eax
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 332(%esp), %eax
+ movl 112(%esp), %ecx # 4-byte Reload
+ adcl 336(%esp), %ecx
+ movl %ecx, 112(%esp) # 4-byte Spill
+ adcl 340(%esp), %ebp
+ movl %ebp, 132(%esp) # 4-byte Spill
+ adcl 344(%esp), %edi
+ movl %edi, 152(%esp) # 4-byte Spill
+ movl 156(%esp), %ecx # 4-byte Reload
+ adcl 348(%esp), %ecx
+ movl %ecx, 156(%esp) # 4-byte Spill
+ movl 136(%esp), %ecx # 4-byte Reload
+ adcl 352(%esp), %ecx
+ movl %ecx, 136(%esp) # 4-byte Spill
+ movl 148(%esp), %ecx # 4-byte Reload
+ adcl 356(%esp), %ecx
+ movl %ecx, 148(%esp) # 4-byte Spill
+ movl 116(%esp), %ebp # 4-byte Reload
+ adcl 360(%esp), %ebp
+ adcl 364(%esp), %esi
+ movl %esi, 140(%esp) # 4-byte Spill
+ movl 124(%esp), %ecx # 4-byte Reload
+ adcl 368(%esp), %ecx
+ movl %ecx, 124(%esp) # 4-byte Spill
+ movl 144(%esp), %ecx # 4-byte Reload
+ adcl 372(%esp), %ecx
+ movl %ecx, 144(%esp) # 4-byte Spill
+ movl 128(%esp), %ecx # 4-byte Reload
+ adcl 376(%esp), %ecx
+ movl %ecx, 128(%esp) # 4-byte Spill
+ adcl $0, 120(%esp) # 4-byte Folded Spill
+ adcl $0, 108(%esp) # 4-byte Folded Spill
+ adcl $0, 104(%esp) # 4-byte Folded Spill
+ movl 76(%esp), %edi # 4-byte Reload
+ adcl $0, %edi
+ movl %eax, %esi
+ imull 84(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 272(%esp), %ecx
+ movl 856(%esp), %edx
+ movl 92(%esp), %ebx # 4-byte Reload
+ calll .LmulPv384x32
+ addl 272(%esp), %esi
+ movl 112(%esp), %eax # 4-byte Reload
+ adcl 276(%esp), %eax
+ movl 132(%esp), %ecx # 4-byte Reload
+ adcl 280(%esp), %ecx
+ movl %ecx, 132(%esp) # 4-byte Spill
+ movl 152(%esp), %ecx # 4-byte Reload
+ adcl 284(%esp), %ecx
+ movl %ecx, 152(%esp) # 4-byte Spill
+ movl 156(%esp), %ecx # 4-byte Reload
+ adcl 288(%esp), %ecx
+ movl %ecx, 156(%esp) # 4-byte Spill
+ movl 136(%esp), %ecx # 4-byte Reload
+ adcl 292(%esp), %ecx
+ movl %ecx, 136(%esp) # 4-byte Spill
+ movl 148(%esp), %ecx # 4-byte Reload
+ adcl 296(%esp), %ecx
+ movl %ecx, 148(%esp) # 4-byte Spill
+ movl %ebp, %esi
+ adcl 300(%esp), %esi
+ movl 140(%esp), %ecx # 4-byte Reload
+ adcl 304(%esp), %ecx
+ movl %ecx, 140(%esp) # 4-byte Spill
+ movl 124(%esp), %ecx # 4-byte Reload
+ adcl 308(%esp), %ecx
+ movl %ecx, 124(%esp) # 4-byte Spill
+ movl 144(%esp), %ecx # 4-byte Reload
+ adcl 312(%esp), %ecx
+ movl %ecx, 144(%esp) # 4-byte Spill
+ movl 128(%esp), %ecx # 4-byte Reload
+ adcl 316(%esp), %ecx
+ movl %ecx, 128(%esp) # 4-byte Spill
+ movl 120(%esp), %ecx # 4-byte Reload
+ adcl 320(%esp), %ecx
+ movl %ecx, 120(%esp) # 4-byte Spill
+ adcl $0, 108(%esp) # 4-byte Folded Spill
+ adcl $0, 104(%esp) # 4-byte Folded Spill
+ adcl $0, %edi
+ movl %edi, %ebp
+ movl %eax, %edi
+ imull 84(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 216(%esp), %ecx
+ movl 856(%esp), %edx
+ movl 92(%esp), %ebx # 4-byte Reload
+ calll .LmulPv384x32
+ addl 216(%esp), %edi
+ movl 132(%esp), %ecx # 4-byte Reload
+ adcl 220(%esp), %ecx
+ movl 152(%esp), %eax # 4-byte Reload
+ adcl 224(%esp), %eax
+ movl %eax, 152(%esp) # 4-byte Spill
+ movl 156(%esp), %eax # 4-byte Reload
+ adcl 228(%esp), %eax
+ movl %eax, 156(%esp) # 4-byte Spill
+ movl 136(%esp), %eax # 4-byte Reload
+ adcl 232(%esp), %eax
+ movl %eax, 136(%esp) # 4-byte Spill
+ movl 148(%esp), %eax # 4-byte Reload
+ adcl 236(%esp), %eax
+ movl %eax, 148(%esp) # 4-byte Spill
+ adcl 240(%esp), %esi
+ movl %esi, 116(%esp) # 4-byte Spill
+ movl 140(%esp), %eax # 4-byte Reload
+ adcl 244(%esp), %eax
+ movl %eax, 140(%esp) # 4-byte Spill
+ movl 124(%esp), %esi # 4-byte Reload
+ adcl 248(%esp), %esi
+ movl 144(%esp), %eax # 4-byte Reload
+ adcl 252(%esp), %eax
+ movl %eax, 144(%esp) # 4-byte Spill
+ movl 128(%esp), %eax # 4-byte Reload
+ adcl 256(%esp), %eax
+ movl %eax, 128(%esp) # 4-byte Spill
+ movl 120(%esp), %eax # 4-byte Reload
+ adcl 260(%esp), %eax
+ movl %eax, 120(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 264(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ adcl $0, 104(%esp) # 4-byte Folded Spill
+ adcl $0, %ebp
+ movl 84(%esp), %eax # 4-byte Reload
+ imull %ecx, %eax
+ movl %ecx, %edi
+ movl %eax, (%esp)
+ leal 160(%esp), %ecx
+ movl 856(%esp), %edx
+ movl 92(%esp), %ebx # 4-byte Reload
+ calll .LmulPv384x32
+ addl 160(%esp), %edi
+ movl 152(%esp), %eax # 4-byte Reload
+ adcl 164(%esp), %eax
+ movl %eax, 152(%esp) # 4-byte Spill
+ movl %eax, %edi
+ movl 156(%esp), %eax # 4-byte Reload
+ adcl 168(%esp), %eax
+ movl %eax, 156(%esp) # 4-byte Spill
+ movl 136(%esp), %edx # 4-byte Reload
+ adcl 172(%esp), %edx
+ movl %edx, 136(%esp) # 4-byte Spill
+ movl 148(%esp), %ebx # 4-byte Reload
+ adcl 176(%esp), %ebx
+ movl %ebx, 148(%esp) # 4-byte Spill
+ movl 116(%esp), %ecx # 4-byte Reload
+ adcl 180(%esp), %ecx
+ movl %ecx, 116(%esp) # 4-byte Spill
+ movl 140(%esp), %eax # 4-byte Reload
+ adcl 184(%esp), %eax
+ movl %eax, 140(%esp) # 4-byte Spill
+ adcl 188(%esp), %esi
+ movl %esi, 124(%esp) # 4-byte Spill
+ movl 144(%esp), %eax # 4-byte Reload
+ adcl 192(%esp), %eax
+ movl %eax, 144(%esp) # 4-byte Spill
+ movl 128(%esp), %eax # 4-byte Reload
+ adcl 196(%esp), %eax
+ movl %eax, 128(%esp) # 4-byte Spill
+ movl 120(%esp), %eax # 4-byte Reload
+ adcl 200(%esp), %eax
+ movl %eax, 120(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 204(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 208(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ adcl $0, %ebp
+ subl 24(%esp), %edi # 4-byte Folded Reload
+ movl 156(%esp), %esi # 4-byte Reload
+ sbbl 16(%esp), %esi # 4-byte Folded Reload
+ sbbl 20(%esp), %edx # 4-byte Folded Reload
+ sbbl 28(%esp), %ebx # 4-byte Folded Reload
+ sbbl 32(%esp), %ecx # 4-byte Folded Reload
+ movl 140(%esp), %eax # 4-byte Reload
+ sbbl 36(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 124(%esp), %eax # 4-byte Reload
+ sbbl 40(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 144(%esp), %eax # 4-byte Reload
+ sbbl 44(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 128(%esp), %eax # 4-byte Reload
+ sbbl 48(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 120(%esp), %eax # 4-byte Reload
+ sbbl 52(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ sbbl 56(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 104(%esp), %eax # 4-byte Reload
+ sbbl 60(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 132(%esp) # 4-byte Spill
+ sbbl $0, %ebp
+ andl $1, %ebp
+ jne .LBB180_2
+# BB#1:
+ movl %ebx, 148(%esp) # 4-byte Spill
+.LBB180_2:
+ movl %ebp, %ebx
+ testb %bl, %bl
+ movl 152(%esp), %ebx # 4-byte Reload
+ jne .LBB180_4
+# BB#3:
+ movl %edi, %ebx
+.LBB180_4:
+ movl 848(%esp), %edi
+ movl %ebx, (%edi)
+ movl 144(%esp), %ebx # 4-byte Reload
+ jne .LBB180_6
+# BB#5:
+ movl %esi, 156(%esp) # 4-byte Spill
+.LBB180_6:
+ movl 156(%esp), %esi # 4-byte Reload
+ movl %esi, 4(%edi)
+ movl 136(%esp), %esi # 4-byte Reload
+ jne .LBB180_8
+# BB#7:
+ movl %edx, %esi
+.LBB180_8:
+ movl %esi, 8(%edi)
+ movl 148(%esp), %edx # 4-byte Reload
+ movl %edx, 12(%edi)
+ movl 128(%esp), %esi # 4-byte Reload
+ movl 116(%esp), %edx # 4-byte Reload
+ jne .LBB180_10
+# BB#9:
+ movl %ecx, %edx
+.LBB180_10:
+ movl %edx, 16(%edi)
+ movl 120(%esp), %edx # 4-byte Reload
+ movl 140(%esp), %ecx # 4-byte Reload
+ jne .LBB180_12
+# BB#11:
+ movl 84(%esp), %ecx # 4-byte Reload
+.LBB180_12:
+ movl %ecx, 20(%edi)
+ movl 108(%esp), %ecx # 4-byte Reload
+ movl 124(%esp), %eax # 4-byte Reload
+ jne .LBB180_14
+# BB#13:
+ movl 88(%esp), %eax # 4-byte Reload
+.LBB180_14:
+ movl %eax, 24(%edi)
+ movl 104(%esp), %eax # 4-byte Reload
+ jne .LBB180_16
+# BB#15:
+ movl 92(%esp), %ebx # 4-byte Reload
+.LBB180_16:
+ movl %ebx, 28(%edi)
+ jne .LBB180_18
+# BB#17:
+ movl 96(%esp), %esi # 4-byte Reload
+.LBB180_18:
+ movl %esi, 32(%edi)
+ jne .LBB180_20
+# BB#19:
+ movl 100(%esp), %edx # 4-byte Reload
+.LBB180_20:
+ movl %edx, 36(%edi)
+ jne .LBB180_22
+# BB#21:
+ movl 112(%esp), %ecx # 4-byte Reload
+.LBB180_22:
+ movl %ecx, 40(%edi)
+ jne .LBB180_24
+# BB#23:
+ movl 132(%esp), %eax # 4-byte Reload
+.LBB180_24:
+ movl %eax, 44(%edi)
+ addl $828, %esp # imm = 0x33C
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end180:
+ .size mcl_fp_montRed12L, .Lfunc_end180-mcl_fp_montRed12L
+
+ .globl mcl_fp_addPre12L
+ .align 16, 0x90
+ .type mcl_fp_addPre12L,@function
+mcl_fp_addPre12L: # @mcl_fp_addPre12L
+# BB#0:
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ movl 24(%esp), %eax
+ movl (%eax), %edx
+ movl 4(%eax), %esi
+ movl 20(%esp), %ecx
+ addl (%ecx), %edx
+ adcl 4(%ecx), %esi
+ movl 8(%eax), %edi
+ adcl 8(%ecx), %edi
+ movl 16(%esp), %ebx
+ movl %edx, (%ebx)
+ movl 12(%ecx), %edx
+ movl %esi, 4(%ebx)
+ movl 16(%ecx), %esi
+ adcl 12(%eax), %edx
+ adcl 16(%eax), %esi
+ movl %edi, 8(%ebx)
+ movl 20(%eax), %edi
+ movl %edx, 12(%ebx)
+ movl 20(%ecx), %edx
+ adcl %edi, %edx
+ movl 24(%eax), %edi
+ movl %esi, 16(%ebx)
+ movl 24(%ecx), %esi
+ adcl %edi, %esi
+ movl 28(%eax), %edi
+ movl %edx, 20(%ebx)
+ movl 28(%ecx), %edx
+ adcl %edi, %edx
+ movl 32(%eax), %edi
+ movl %esi, 24(%ebx)
+ movl 32(%ecx), %esi
+ adcl %edi, %esi
+ movl 36(%eax), %edi
+ movl %edx, 28(%ebx)
+ movl 36(%ecx), %edx
+ adcl %edi, %edx
+ movl 40(%eax), %edi
+ movl %esi, 32(%ebx)
+ movl 40(%ecx), %esi
+ adcl %edi, %esi
+ movl %edx, 36(%ebx)
+ movl %esi, 40(%ebx)
+ movl 44(%eax), %eax
+ movl 44(%ecx), %ecx
+ adcl %eax, %ecx
+ movl %ecx, 44(%ebx)
+ sbbl %eax, %eax
+ andl $1, %eax
+ popl %esi
+ popl %edi
+ popl %ebx
+ retl
+.Lfunc_end181:
+ .size mcl_fp_addPre12L, .Lfunc_end181-mcl_fp_addPre12L
+
+ .globl mcl_fp_subPre12L
+ .align 16, 0x90
+ .type mcl_fp_subPre12L,@function
+mcl_fp_subPre12L: # @mcl_fp_subPre12L
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ movl 24(%esp), %ecx
+ movl (%ecx), %esi
+ movl 4(%ecx), %edi
+ xorl %eax, %eax
+ movl 28(%esp), %edx
+ subl (%edx), %esi
+ sbbl 4(%edx), %edi
+ movl 8(%ecx), %ebx
+ sbbl 8(%edx), %ebx
+ movl 20(%esp), %ebp
+ movl %esi, (%ebp)
+ movl 12(%ecx), %esi
+ sbbl 12(%edx), %esi
+ movl %edi, 4(%ebp)
+ movl 16(%ecx), %edi
+ sbbl 16(%edx), %edi
+ movl %ebx, 8(%ebp)
+ movl 20(%edx), %ebx
+ movl %esi, 12(%ebp)
+ movl 20(%ecx), %esi
+ sbbl %ebx, %esi
+ movl 24(%edx), %ebx
+ movl %edi, 16(%ebp)
+ movl 24(%ecx), %edi
+ sbbl %ebx, %edi
+ movl 28(%edx), %ebx
+ movl %esi, 20(%ebp)
+ movl 28(%ecx), %esi
+ sbbl %ebx, %esi
+ movl 32(%edx), %ebx
+ movl %edi, 24(%ebp)
+ movl 32(%ecx), %edi
+ sbbl %ebx, %edi
+ movl 36(%edx), %ebx
+ movl %esi, 28(%ebp)
+ movl 36(%ecx), %esi
+ sbbl %ebx, %esi
+ movl 40(%edx), %ebx
+ movl %edi, 32(%ebp)
+ movl 40(%ecx), %edi
+ sbbl %ebx, %edi
+ movl %esi, 36(%ebp)
+ movl %edi, 40(%ebp)
+ movl 44(%edx), %edx
+ movl 44(%ecx), %ecx
+ sbbl %edx, %ecx
+ movl %ecx, 44(%ebp)
+ sbbl $0, %eax
+ andl $1, %eax
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end182:
+ .size mcl_fp_subPre12L, .Lfunc_end182-mcl_fp_subPre12L
+
+ .globl mcl_fp_shr1_12L
+ .align 16, 0x90
+ .type mcl_fp_shr1_12L,@function
+mcl_fp_shr1_12L: # @mcl_fp_shr1_12L
+# BB#0:
+ pushl %esi
+ movl 12(%esp), %eax
+ movl (%eax), %edx
+ movl 4(%eax), %esi
+ shrdl $1, %esi, %edx
+ movl 8(%esp), %ecx
+ movl %edx, (%ecx)
+ movl 8(%eax), %edx
+ shrdl $1, %edx, %esi
+ movl %esi, 4(%ecx)
+ movl 12(%eax), %esi
+ shrdl $1, %esi, %edx
+ movl %edx, 8(%ecx)
+ movl 16(%eax), %edx
+ shrdl $1, %edx, %esi
+ movl %esi, 12(%ecx)
+ movl 20(%eax), %esi
+ shrdl $1, %esi, %edx
+ movl %edx, 16(%ecx)
+ movl 24(%eax), %edx
+ shrdl $1, %edx, %esi
+ movl %esi, 20(%ecx)
+ movl 28(%eax), %esi
+ shrdl $1, %esi, %edx
+ movl %edx, 24(%ecx)
+ movl 32(%eax), %edx
+ shrdl $1, %edx, %esi
+ movl %esi, 28(%ecx)
+ movl 36(%eax), %esi
+ shrdl $1, %esi, %edx
+ movl %edx, 32(%ecx)
+ movl 40(%eax), %edx
+ shrdl $1, %edx, %esi
+ movl %esi, 36(%ecx)
+ movl 44(%eax), %eax
+ shrdl $1, %eax, %edx
+ movl %edx, 40(%ecx)
+ shrl %eax
+ movl %eax, 44(%ecx)
+ popl %esi
+ retl
+.Lfunc_end183:
+ .size mcl_fp_shr1_12L, .Lfunc_end183-mcl_fp_shr1_12L
+
+ .globl mcl_fp_add12L
+ .align 16, 0x90
+ .type mcl_fp_add12L,@function
+mcl_fp_add12L: # @mcl_fp_add12L
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $36, %esp
+ movl 64(%esp), %ebx
+ movl (%ebx), %edx
+ movl 4(%ebx), %ecx
+ movl 60(%esp), %eax
+ addl (%eax), %edx
+ movl %edx, 4(%esp) # 4-byte Spill
+ adcl 4(%eax), %ecx
+ movl %ecx, 32(%esp) # 4-byte Spill
+ movl 8(%ebx), %ecx
+ adcl 8(%eax), %ecx
+ movl %ecx, 28(%esp) # 4-byte Spill
+ movl 12(%eax), %edx
+ movl 16(%eax), %ecx
+ adcl 12(%ebx), %edx
+ movl %edx, 24(%esp) # 4-byte Spill
+ adcl 16(%ebx), %ecx
+ movl %ecx, 20(%esp) # 4-byte Spill
+ movl 20(%eax), %ecx
+ adcl 20(%ebx), %ecx
+ movl %ecx, 16(%esp) # 4-byte Spill
+ movl 24(%eax), %ecx
+ adcl 24(%ebx), %ecx
+ movl %ecx, 12(%esp) # 4-byte Spill
+ movl 28(%eax), %ecx
+ adcl 28(%ebx), %ecx
+ movl %ecx, 8(%esp) # 4-byte Spill
+ movl 32(%eax), %ebp
+ adcl 32(%ebx), %ebp
+ movl %ebp, (%esp) # 4-byte Spill
+ movl 36(%eax), %edi
+ adcl 36(%ebx), %edi
+ movl 40(%eax), %esi
+ adcl 40(%ebx), %esi
+ movl 44(%eax), %edx
+ adcl 44(%ebx), %edx
+ movl 56(%esp), %ebx
+ movl 4(%esp), %eax # 4-byte Reload
+ movl %eax, (%ebx)
+ movl 32(%esp), %ecx # 4-byte Reload
+ movl %ecx, 4(%ebx)
+ movl 28(%esp), %ecx # 4-byte Reload
+ movl %ecx, 8(%ebx)
+ movl 24(%esp), %ecx # 4-byte Reload
+ movl %ecx, 12(%ebx)
+ movl 20(%esp), %ecx # 4-byte Reload
+ movl %ecx, 16(%ebx)
+ movl 16(%esp), %ecx # 4-byte Reload
+ movl %ecx, 20(%ebx)
+ movl 12(%esp), %ecx # 4-byte Reload
+ movl %ecx, 24(%ebx)
+ movl 8(%esp), %ecx # 4-byte Reload
+ movl %ecx, 28(%ebx)
+ movl %ebp, 32(%ebx)
+ movl %edi, 36(%ebx)
+ movl %esi, 40(%ebx)
+ movl %edx, 44(%ebx)
+ sbbl %ecx, %ecx
+ andl $1, %ecx
+ movl 68(%esp), %ebp
+ subl (%ebp), %eax
+ movl %eax, 4(%esp) # 4-byte Spill
+ movl 32(%esp), %eax # 4-byte Reload
+ sbbl 4(%ebp), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 28(%esp), %eax # 4-byte Reload
+ sbbl 8(%ebp), %eax
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl 24(%esp), %eax # 4-byte Reload
+ sbbl 12(%ebp), %eax
+ movl %eax, 24(%esp) # 4-byte Spill
+ movl 20(%esp), %eax # 4-byte Reload
+ sbbl 16(%ebp), %eax
+ movl %eax, 20(%esp) # 4-byte Spill
+ movl 16(%esp), %eax # 4-byte Reload
+ sbbl 20(%ebp), %eax
+ movl %eax, 16(%esp) # 4-byte Spill
+ movl 12(%esp), %eax # 4-byte Reload
+ sbbl 24(%ebp), %eax
+ movl %eax, 12(%esp) # 4-byte Spill
+ movl 8(%esp), %eax # 4-byte Reload
+ sbbl 28(%ebp), %eax
+ movl %eax, 8(%esp) # 4-byte Spill
+ movl (%esp), %eax # 4-byte Reload
+ sbbl 32(%ebp), %eax
+ movl %eax, (%esp) # 4-byte Spill
+ sbbl 36(%ebp), %edi
+ sbbl 40(%ebp), %esi
+ sbbl 44(%ebp), %edx
+ sbbl $0, %ecx
+ testb $1, %cl
+ jne .LBB184_2
+# BB#1: # %nocarry
+ movl 4(%esp), %eax # 4-byte Reload
+ movl %eax, (%ebx)
+ movl 32(%esp), %eax # 4-byte Reload
+ movl %eax, 4(%ebx)
+ movl 28(%esp), %eax # 4-byte Reload
+ movl %eax, 8(%ebx)
+ movl 24(%esp), %eax # 4-byte Reload
+ movl %eax, 12(%ebx)
+ movl 20(%esp), %eax # 4-byte Reload
+ movl %eax, 16(%ebx)
+ movl 16(%esp), %eax # 4-byte Reload
+ movl %eax, 20(%ebx)
+ movl 12(%esp), %eax # 4-byte Reload
+ movl %eax, 24(%ebx)
+ movl 8(%esp), %eax # 4-byte Reload
+ movl %eax, 28(%ebx)
+ movl (%esp), %eax # 4-byte Reload
+ movl %eax, 32(%ebx)
+ movl %edi, 36(%ebx)
+ movl %esi, 40(%ebx)
+ movl %edx, 44(%ebx)
+.LBB184_2: # %carry
+ addl $36, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end184:
+ .size mcl_fp_add12L, .Lfunc_end184-mcl_fp_add12L
+
+ .globl mcl_fp_addNF12L
+ .align 16, 0x90
+ .type mcl_fp_addNF12L,@function
+mcl_fp_addNF12L: # @mcl_fp_addNF12L
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $88, %esp
+ movl 116(%esp), %esi
+ movl (%esi), %eax
+ movl 4(%esi), %ecx
+ movl 112(%esp), %edx
+ addl (%edx), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ adcl 4(%edx), %ecx
+ movl %ecx, 64(%esp) # 4-byte Spill
+ movl 44(%esi), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 40(%esi), %ebp
+ movl 36(%esi), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 32(%esi), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 28(%esi), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 24(%esi), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 20(%esi), %ebx
+ movl 16(%esi), %edi
+ movl 12(%esi), %ecx
+ movl 8(%esi), %eax
+ adcl 8(%edx), %eax
+ adcl 12(%edx), %ecx
+ movl %ecx, 48(%esp) # 4-byte Spill
+ adcl 16(%edx), %edi
+ movl %edi, 52(%esp) # 4-byte Spill
+ adcl 20(%edx), %ebx
+ movl %ebx, 56(%esp) # 4-byte Spill
+ movl 68(%esp), %esi # 4-byte Reload
+ adcl 24(%edx), %esi
+ movl %esi, 68(%esp) # 4-byte Spill
+ movl 84(%esp), %esi # 4-byte Reload
+ adcl 28(%edx), %esi
+ movl %esi, 84(%esp) # 4-byte Spill
+ movl 76(%esp), %esi # 4-byte Reload
+ adcl 32(%edx), %esi
+ movl %esi, 76(%esp) # 4-byte Spill
+ movl 72(%esp), %esi # 4-byte Reload
+ adcl 36(%edx), %esi
+ movl %esi, 72(%esp) # 4-byte Spill
+ movl %eax, %esi
+ adcl 40(%edx), %ebp
+ movl %ebp, 44(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 44(%edx), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 120(%esp), %ebp
+ movl 60(%esp), %edx # 4-byte Reload
+ subl (%ebp), %edx
+ movl 64(%esp), %eax # 4-byte Reload
+ sbbl 4(%ebp), %eax
+ movl %eax, (%esp) # 4-byte Spill
+ movl %esi, %eax
+ sbbl 8(%ebp), %eax
+ movl %eax, 4(%esp) # 4-byte Spill
+ sbbl 12(%ebp), %ecx
+ movl %ecx, 8(%esp) # 4-byte Spill
+ sbbl 16(%ebp), %edi
+ movl %edi, 12(%esp) # 4-byte Spill
+ sbbl 20(%ebp), %ebx
+ movl %ebx, 16(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ movl %eax, %ecx
+ sbbl 24(%ebp), %ecx
+ movl %ecx, 20(%esp) # 4-byte Spill
+ movl 84(%esp), %ecx # 4-byte Reload
+ sbbl 28(%ebp), %ecx
+ movl %ecx, 24(%esp) # 4-byte Spill
+ movl 76(%esp), %ecx # 4-byte Reload
+ sbbl 32(%ebp), %ecx
+ movl %ecx, 28(%esp) # 4-byte Spill
+ movl 72(%esp), %ecx # 4-byte Reload
+ sbbl 36(%ebp), %ecx
+ movl %ecx, 32(%esp) # 4-byte Spill
+ movl 44(%esp), %ecx # 4-byte Reload
+ movl %ecx, %edi
+ sbbl 40(%ebp), %edi
+ movl %edi, 36(%esp) # 4-byte Spill
+ movl 80(%esp), %edi # 4-byte Reload
+ sbbl 44(%ebp), %edi
+ movl %edi, 40(%esp) # 4-byte Spill
+ movl %edi, %ebp
+ movl 60(%esp), %edi # 4-byte Reload
+ sarl $31, %ebp
+ testl %ebp, %ebp
+ js .LBB185_2
+# BB#1:
+ movl %edx, %edi
+.LBB185_2:
+ movl 108(%esp), %edx
+ movl %edi, (%edx)
+ movl 64(%esp), %edi # 4-byte Reload
+ js .LBB185_4
+# BB#3:
+ movl (%esp), %edi # 4-byte Reload
+.LBB185_4:
+ movl %edi, 4(%edx)
+ movl %eax, %ebp
+ js .LBB185_6
+# BB#5:
+ movl 4(%esp), %esi # 4-byte Reload
+.LBB185_6:
+ movl %esi, 8(%edx)
+ movl %ecx, %esi
+ movl 52(%esp), %eax # 4-byte Reload
+ movl 48(%esp), %ecx # 4-byte Reload
+ js .LBB185_8
+# BB#7:
+ movl 8(%esp), %ecx # 4-byte Reload
+.LBB185_8:
+ movl %ecx, 12(%edx)
+ movl 76(%esp), %ebx # 4-byte Reload
+ movl 84(%esp), %edi # 4-byte Reload
+ js .LBB185_10
+# BB#9:
+ movl 12(%esp), %eax # 4-byte Reload
+.LBB185_10:
+ movl %eax, 16(%edx)
+ movl 80(%esp), %ecx # 4-byte Reload
+ js .LBB185_12
+# BB#11:
+ movl 16(%esp), %eax # 4-byte Reload
+ movl %eax, 56(%esp) # 4-byte Spill
+.LBB185_12:
+ movl 56(%esp), %eax # 4-byte Reload
+ movl %eax, 20(%edx)
+ js .LBB185_14
+# BB#13:
+ movl 20(%esp), %ebp # 4-byte Reload
+.LBB185_14:
+ movl %ebp, 24(%edx)
+ js .LBB185_16
+# BB#15:
+ movl 24(%esp), %edi # 4-byte Reload
+.LBB185_16:
+ movl %edi, 28(%edx)
+ js .LBB185_18
+# BB#17:
+ movl 28(%esp), %ebx # 4-byte Reload
+.LBB185_18:
+ movl %ebx, 32(%edx)
+ movl 72(%esp), %eax # 4-byte Reload
+ js .LBB185_20
+# BB#19:
+ movl 32(%esp), %eax # 4-byte Reload
+.LBB185_20:
+ movl %eax, 36(%edx)
+ js .LBB185_22
+# BB#21:
+ movl 36(%esp), %esi # 4-byte Reload
+.LBB185_22:
+ movl %esi, 40(%edx)
+ js .LBB185_24
+# BB#23:
+ movl 40(%esp), %ecx # 4-byte Reload
+.LBB185_24:
+ movl %ecx, 44(%edx)
+ addl $88, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end185:
+ .size mcl_fp_addNF12L, .Lfunc_end185-mcl_fp_addNF12L
+
+ .globl mcl_fp_sub12L
+ .align 16, 0x90
+ .type mcl_fp_sub12L,@function
+mcl_fp_sub12L: # @mcl_fp_sub12L
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $40, %esp
+ movl 64(%esp), %esi
+ movl (%esi), %ecx
+ movl 4(%esi), %eax
+ xorl %ebx, %ebx
+ movl 68(%esp), %edi
+ subl (%edi), %ecx
+ movl %ecx, 12(%esp) # 4-byte Spill
+ sbbl 4(%edi), %eax
+ movl %eax, 24(%esp) # 4-byte Spill
+ movl 8(%esi), %eax
+ sbbl 8(%edi), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 12(%esi), %eax
+ sbbl 12(%edi), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 16(%esi), %eax
+ sbbl 16(%edi), %eax
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl 20(%esi), %eax
+ sbbl 20(%edi), %eax
+ movl %eax, 20(%esp) # 4-byte Spill
+ movl 24(%esi), %eax
+ sbbl 24(%edi), %eax
+ movl %eax, 16(%esp) # 4-byte Spill
+ movl 28(%esi), %edx
+ sbbl 28(%edi), %edx
+ movl %edx, 8(%esp) # 4-byte Spill
+ movl 32(%esi), %ecx
+ sbbl 32(%edi), %ecx
+ movl %ecx, 4(%esp) # 4-byte Spill
+ movl 36(%esi), %eax
+ sbbl 36(%edi), %eax
+ movl %eax, (%esp) # 4-byte Spill
+ movl 40(%esi), %ebp
+ sbbl 40(%edi), %ebp
+ movl 44(%esi), %esi
+ sbbl 44(%edi), %esi
+ sbbl $0, %ebx
+ testb $1, %bl
+ movl 60(%esp), %ebx
+ movl 12(%esp), %edi # 4-byte Reload
+ movl %edi, (%ebx)
+ movl 24(%esp), %edi # 4-byte Reload
+ movl %edi, 4(%ebx)
+ movl 32(%esp), %edi # 4-byte Reload
+ movl %edi, 8(%ebx)
+ movl 36(%esp), %edi # 4-byte Reload
+ movl %edi, 12(%ebx)
+ movl 28(%esp), %edi # 4-byte Reload
+ movl %edi, 16(%ebx)
+ movl 20(%esp), %edi # 4-byte Reload
+ movl %edi, 20(%ebx)
+ movl 16(%esp), %edi # 4-byte Reload
+ movl %edi, 24(%ebx)
+ movl %edx, 28(%ebx)
+ movl %ecx, 32(%ebx)
+ movl %eax, 36(%ebx)
+ movl %ebp, 40(%ebx)
+ movl %esi, 44(%ebx)
+ je .LBB186_2
+# BB#1: # %carry
+ movl %esi, %edi
+ movl 72(%esp), %esi
+ movl 12(%esp), %ecx # 4-byte Reload
+ addl (%esi), %ecx
+ movl %ecx, (%ebx)
+ movl 24(%esp), %edx # 4-byte Reload
+ adcl 4(%esi), %edx
+ movl %edx, 4(%ebx)
+ movl 32(%esp), %ecx # 4-byte Reload
+ adcl 8(%esi), %ecx
+ movl 12(%esi), %eax
+ adcl 36(%esp), %eax # 4-byte Folded Reload
+ movl %ecx, 8(%ebx)
+ movl 16(%esi), %ecx
+ adcl 28(%esp), %ecx # 4-byte Folded Reload
+ movl %eax, 12(%ebx)
+ movl 20(%esi), %eax
+ adcl 20(%esp), %eax # 4-byte Folded Reload
+ movl %ecx, 16(%ebx)
+ movl 24(%esi), %ecx
+ adcl 16(%esp), %ecx # 4-byte Folded Reload
+ movl %eax, 20(%ebx)
+ movl 28(%esi), %eax
+ adcl 8(%esp), %eax # 4-byte Folded Reload
+ movl %ecx, 24(%ebx)
+ movl 32(%esi), %ecx
+ adcl 4(%esp), %ecx # 4-byte Folded Reload
+ movl %eax, 28(%ebx)
+ movl 36(%esi), %eax
+ adcl (%esp), %eax # 4-byte Folded Reload
+ movl %ecx, 32(%ebx)
+ movl %eax, 36(%ebx)
+ movl 40(%esi), %eax
+ adcl %ebp, %eax
+ movl %eax, 40(%ebx)
+ movl 44(%esi), %eax
+ adcl %edi, %eax
+ movl %eax, 44(%ebx)
+.LBB186_2: # %nocarry
+ addl $40, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end186:
+ .size mcl_fp_sub12L, .Lfunc_end186-mcl_fp_sub12L
+
+ .globl mcl_fp_subNF12L
+ .align 16, 0x90
+ .type mcl_fp_subNF12L,@function
+mcl_fp_subNF12L: # @mcl_fp_subNF12L
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $72, %esp
+ movl 96(%esp), %ecx
+ movl (%ecx), %edx
+ movl 4(%ecx), %eax
+ movl 100(%esp), %edi
+ subl (%edi), %edx
+ movl %edx, 48(%esp) # 4-byte Spill
+ sbbl 4(%edi), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 44(%ecx), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 40(%ecx), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 36(%ecx), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 32(%ecx), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 28(%ecx), %ebp
+ movl 24(%ecx), %ebx
+ movl 20(%ecx), %esi
+ movl 16(%ecx), %edx
+ movl 12(%ecx), %eax
+ movl 8(%ecx), %ecx
+ sbbl 8(%edi), %ecx
+ movl %ecx, 24(%esp) # 4-byte Spill
+ sbbl 12(%edi), %eax
+ movl %eax, 28(%esp) # 4-byte Spill
+ sbbl 16(%edi), %edx
+ movl %edx, 32(%esp) # 4-byte Spill
+ sbbl 20(%edi), %esi
+ movl %esi, 36(%esp) # 4-byte Spill
+ sbbl 24(%edi), %ebx
+ movl %ebx, 40(%esp) # 4-byte Spill
+ sbbl 28(%edi), %ebp
+ movl %ebp, 44(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ sbbl 32(%edi), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ sbbl 36(%edi), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ sbbl 40(%edi), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 56(%esp), %ecx # 4-byte Reload
+ sbbl 44(%edi), %ecx
+ movl %ecx, 56(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ sarl $31, %eax
+ movl %eax, %edx
+ addl %edx, %edx
+ movl %eax, %edi
+ adcl %edi, %edi
+ movl %eax, %ebp
+ adcl %ebp, %ebp
+ movl %eax, %esi
+ adcl %esi, %esi
+ shrl $31, %ecx
+ orl %edx, %ecx
+ movl 104(%esp), %edx
+ andl 12(%edx), %esi
+ movl %esi, 8(%esp) # 4-byte Spill
+ andl 8(%edx), %ebp
+ andl 4(%edx), %edi
+ andl (%edx), %ecx
+ movl 44(%edx), %esi
+ andl %eax, %esi
+ movl %esi, 20(%esp) # 4-byte Spill
+ movl 40(%edx), %esi
+ andl %eax, %esi
+ movl %esi, 16(%esp) # 4-byte Spill
+ movl 36(%edx), %esi
+ andl %eax, %esi
+ movl %esi, 12(%esp) # 4-byte Spill
+ movl 32(%edx), %esi
+ andl %eax, %esi
+ movl %esi, 4(%esp) # 4-byte Spill
+ movl 28(%edx), %esi
+ andl %eax, %esi
+ movl %esi, (%esp) # 4-byte Spill
+ movl 24(%edx), %ebx
+ andl %eax, %ebx
+ movl 20(%edx), %esi
+ andl %eax, %esi
+ andl 16(%edx), %eax
+ addl 48(%esp), %ecx # 4-byte Folded Reload
+ adcl 52(%esp), %edi # 4-byte Folded Reload
+ movl 92(%esp), %edx
+ movl %ecx, (%edx)
+ adcl 24(%esp), %ebp # 4-byte Folded Reload
+ movl %edi, 4(%edx)
+ movl 8(%esp), %ecx # 4-byte Reload
+ adcl 28(%esp), %ecx # 4-byte Folded Reload
+ movl %ebp, 8(%edx)
+ adcl 32(%esp), %eax # 4-byte Folded Reload
+ movl %ecx, 12(%edx)
+ adcl 36(%esp), %esi # 4-byte Folded Reload
+ movl %eax, 16(%edx)
+ adcl 40(%esp), %ebx # 4-byte Folded Reload
+ movl %esi, 20(%edx)
+ movl (%esp), %ecx # 4-byte Reload
+ adcl 44(%esp), %ecx # 4-byte Folded Reload
+ movl %ebx, 24(%edx)
+ movl 4(%esp), %eax # 4-byte Reload
+ adcl 60(%esp), %eax # 4-byte Folded Reload
+ movl %ecx, 28(%edx)
+ movl 12(%esp), %ecx # 4-byte Reload
+ adcl 64(%esp), %ecx # 4-byte Folded Reload
+ movl %eax, 32(%edx)
+ movl 16(%esp), %eax # 4-byte Reload
+ adcl 68(%esp), %eax # 4-byte Folded Reload
+ movl %ecx, 36(%edx)
+ movl %eax, 40(%edx)
+ movl 20(%esp), %eax # 4-byte Reload
+ adcl 56(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 44(%edx)
+ addl $72, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end187:
+ .size mcl_fp_subNF12L, .Lfunc_end187-mcl_fp_subNF12L
+
+ .globl mcl_fpDbl_add12L
+ .align 16, 0x90
+ .type mcl_fpDbl_add12L,@function
+mcl_fpDbl_add12L: # @mcl_fpDbl_add12L
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $88, %esp
+ movl 116(%esp), %ecx
+ movl 112(%esp), %edi
+ movl 12(%edi), %esi
+ movl 16(%edi), %edx
+ movl 8(%ecx), %ebx
+ movl (%ecx), %ebp
+ addl (%edi), %ebp
+ movl 108(%esp), %eax
+ movl %ebp, (%eax)
+ movl 4(%ecx), %ebp
+ adcl 4(%edi), %ebp
+ adcl 8(%edi), %ebx
+ adcl 12(%ecx), %esi
+ adcl 16(%ecx), %edx
+ movl %ebp, 4(%eax)
+ movl 56(%ecx), %ebp
+ movl %ebx, 8(%eax)
+ movl 20(%ecx), %ebx
+ movl %esi, 12(%eax)
+ movl 20(%edi), %esi
+ adcl %ebx, %esi
+ movl 24(%ecx), %ebx
+ movl %edx, 16(%eax)
+ movl 24(%edi), %edx
+ adcl %ebx, %edx
+ movl 28(%ecx), %ebx
+ movl %esi, 20(%eax)
+ movl 28(%edi), %esi
+ adcl %ebx, %esi
+ movl 32(%ecx), %ebx
+ movl %edx, 24(%eax)
+ movl 32(%edi), %edx
+ adcl %ebx, %edx
+ movl 36(%ecx), %ebx
+ movl %esi, 28(%eax)
+ movl 36(%edi), %esi
+ adcl %ebx, %esi
+ movl 40(%ecx), %ebx
+ movl %edx, 32(%eax)
+ movl 40(%edi), %edx
+ adcl %ebx, %edx
+ movl 44(%ecx), %ebx
+ movl %esi, 36(%eax)
+ movl 44(%edi), %esi
+ adcl %ebx, %esi
+ movl 48(%ecx), %ebx
+ movl %edx, 40(%eax)
+ movl 48(%edi), %edx
+ adcl %ebx, %edx
+ movl %edx, 72(%esp) # 4-byte Spill
+ movl 52(%ecx), %ebx
+ movl %esi, 44(%eax)
+ movl 52(%edi), %eax
+ adcl %ebx, %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 56(%edi), %eax
+ adcl %ebp, %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 60(%ecx), %eax
+ movl 60(%edi), %edx
+ adcl %eax, %edx
+ movl %edx, 84(%esp) # 4-byte Spill
+ movl 64(%ecx), %eax
+ movl 64(%edi), %edx
+ adcl %eax, %edx
+ movl %edx, 60(%esp) # 4-byte Spill
+ movl 68(%ecx), %eax
+ movl 68(%edi), %edx
+ adcl %eax, %edx
+ movl %edx, 64(%esp) # 4-byte Spill
+ movl 72(%ecx), %eax
+ movl 72(%edi), %edx
+ adcl %eax, %edx
+ movl %edx, 68(%esp) # 4-byte Spill
+ movl 76(%ecx), %eax
+ movl 76(%edi), %edx
+ adcl %eax, %edx
+ movl 80(%ecx), %esi
+ movl 80(%edi), %eax
+ adcl %esi, %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 84(%ecx), %ebx
+ movl 84(%edi), %esi
+ adcl %ebx, %esi
+ movl %esi, 52(%esp) # 4-byte Spill
+ movl 88(%ecx), %ebp
+ movl 88(%edi), %ebx
+ adcl %ebp, %ebx
+ movl %ebx, 56(%esp) # 4-byte Spill
+ movl 92(%ecx), %ecx
+ movl 92(%edi), %edi
+ adcl %ecx, %edi
+ movl %edi, 44(%esp) # 4-byte Spill
+ sbbl %ecx, %ecx
+ andl $1, %ecx
+ movl 120(%esp), %ebp
+ movl 72(%esp), %edi # 4-byte Reload
+ subl (%ebp), %edi
+ movl %edi, 28(%esp) # 4-byte Spill
+ movl 76(%esp), %edi # 4-byte Reload
+ sbbl 4(%ebp), %edi
+ movl %edi, 24(%esp) # 4-byte Spill
+ movl 80(%esp), %edi # 4-byte Reload
+ sbbl 8(%ebp), %edi
+ movl %edi, 20(%esp) # 4-byte Spill
+ movl 84(%esp), %edi # 4-byte Reload
+ sbbl 12(%ebp), %edi
+ movl %edi, 16(%esp) # 4-byte Spill
+ movl 60(%esp), %edi # 4-byte Reload
+ sbbl 16(%ebp), %edi
+ movl %edi, 12(%esp) # 4-byte Spill
+ movl 64(%esp), %edi # 4-byte Reload
+ sbbl 20(%ebp), %edi
+ movl %edi, 8(%esp) # 4-byte Spill
+ movl 68(%esp), %edi # 4-byte Reload
+ sbbl 24(%ebp), %edi
+ movl %edi, 4(%esp) # 4-byte Spill
+ movl %edx, %edi
+ sbbl 28(%ebp), %edi
+ movl %edi, (%esp) # 4-byte Spill
+ sbbl 32(%ebp), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ sbbl 36(%ebp), %esi
+ movl %esi, 36(%esp) # 4-byte Spill
+ movl %ebx, %eax
+ movl 44(%esp), %ebx # 4-byte Reload
+ sbbl 40(%ebp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl %ebx, %edi
+ sbbl 44(%ebp), %edi
+ sbbl $0, %ecx
+ andl $1, %ecx
+ jne .LBB188_2
+# BB#1:
+ movl %edi, %ebx
+.LBB188_2:
+ testb %cl, %cl
+ movl 72(%esp), %ecx # 4-byte Reload
+ movl 68(%esp), %esi # 4-byte Reload
+ movl 64(%esp), %edi # 4-byte Reload
+ movl 60(%esp), %ebp # 4-byte Reload
+ jne .LBB188_4
+# BB#3:
+ movl (%esp), %edx # 4-byte Reload
+ movl 4(%esp), %esi # 4-byte Reload
+ movl 8(%esp), %edi # 4-byte Reload
+ movl 12(%esp), %ebp # 4-byte Reload
+ movl 16(%esp), %eax # 4-byte Reload
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 20(%esp), %eax # 4-byte Reload
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 24(%esp), %eax # 4-byte Reload
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 28(%esp), %ecx # 4-byte Reload
+.LBB188_4:
+ movl 108(%esp), %eax
+ movl %ecx, 48(%eax)
+ movl 76(%esp), %ecx # 4-byte Reload
+ movl %ecx, 52(%eax)
+ movl 80(%esp), %ecx # 4-byte Reload
+ movl %ecx, 56(%eax)
+ movl 84(%esp), %ecx # 4-byte Reload
+ movl %ecx, 60(%eax)
+ movl %ebp, 64(%eax)
+ movl %edi, 68(%eax)
+ movl %esi, 72(%eax)
+ movl %edx, 76(%eax)
+ movl 56(%esp), %ecx # 4-byte Reload
+ movl 48(%esp), %edx # 4-byte Reload
+ jne .LBB188_6
+# BB#5:
+ movl 32(%esp), %edx # 4-byte Reload
+.LBB188_6:
+ movl %edx, 80(%eax)
+ movl 52(%esp), %edx # 4-byte Reload
+ jne .LBB188_8
+# BB#7:
+ movl 36(%esp), %edx # 4-byte Reload
+.LBB188_8:
+ movl %edx, 84(%eax)
+ jne .LBB188_10
+# BB#9:
+ movl 40(%esp), %ecx # 4-byte Reload
+.LBB188_10:
+ movl %ecx, 88(%eax)
+ movl %ebx, 92(%eax)
+ addl $88, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end188:
+ .size mcl_fpDbl_add12L, .Lfunc_end188-mcl_fpDbl_add12L
+
+ .globl mcl_fpDbl_sub12L
+ .align 16, 0x90
+ .type mcl_fpDbl_sub12L,@function
+mcl_fpDbl_sub12L: # @mcl_fpDbl_sub12L
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $76, %esp
+ movl 100(%esp), %esi
+ movl (%esi), %eax
+ movl 4(%esi), %edx
+ movl 104(%esp), %ebx
+ subl (%ebx), %eax
+ sbbl 4(%ebx), %edx
+ movl 8(%esi), %edi
+ sbbl 8(%ebx), %edi
+ movl 96(%esp), %ecx
+ movl %eax, (%ecx)
+ movl 12(%esi), %eax
+ sbbl 12(%ebx), %eax
+ movl %edx, 4(%ecx)
+ movl 16(%esi), %edx
+ sbbl 16(%ebx), %edx
+ movl %edi, 8(%ecx)
+ movl 20(%ebx), %edi
+ movl %eax, 12(%ecx)
+ movl 20(%esi), %eax
+ sbbl %edi, %eax
+ movl 24(%ebx), %edi
+ movl %edx, 16(%ecx)
+ movl 24(%esi), %edx
+ sbbl %edi, %edx
+ movl 28(%ebx), %edi
+ movl %eax, 20(%ecx)
+ movl 28(%esi), %eax
+ sbbl %edi, %eax
+ movl 32(%ebx), %edi
+ movl %edx, 24(%ecx)
+ movl 32(%esi), %edx
+ sbbl %edi, %edx
+ movl 36(%ebx), %edi
+ movl %eax, 28(%ecx)
+ movl 36(%esi), %eax
+ sbbl %edi, %eax
+ movl 40(%ebx), %edi
+ movl %edx, 32(%ecx)
+ movl 40(%esi), %edx
+ sbbl %edi, %edx
+ movl 44(%ebx), %edi
+ movl %eax, 36(%ecx)
+ movl 44(%esi), %eax
+ sbbl %edi, %eax
+ movl 48(%ebx), %edi
+ movl %edx, 40(%ecx)
+ movl 48(%esi), %edx
+ sbbl %edi, %edx
+ movl %edx, 32(%esp) # 4-byte Spill
+ movl 52(%ebx), %edx
+ movl %eax, 44(%ecx)
+ movl 52(%esi), %eax
+ sbbl %edx, %eax
+ movl %eax, 24(%esp) # 4-byte Spill
+ movl 56(%ebx), %eax
+ movl 56(%esi), %edx
+ sbbl %eax, %edx
+ movl %edx, 28(%esp) # 4-byte Spill
+ movl 60(%ebx), %eax
+ movl 60(%esi), %edx
+ sbbl %eax, %edx
+ movl %edx, 40(%esp) # 4-byte Spill
+ movl 64(%ebx), %eax
+ movl 64(%esi), %edx
+ sbbl %eax, %edx
+ movl %edx, 44(%esp) # 4-byte Spill
+ movl 68(%ebx), %eax
+ movl 68(%esi), %edx
+ sbbl %eax, %edx
+ movl %edx, 48(%esp) # 4-byte Spill
+ movl 72(%ebx), %eax
+ movl 72(%esi), %edx
+ sbbl %eax, %edx
+ movl %edx, 52(%esp) # 4-byte Spill
+ movl 76(%ebx), %eax
+ movl 76(%esi), %edx
+ sbbl %eax, %edx
+ movl %edx, 56(%esp) # 4-byte Spill
+ movl 80(%ebx), %eax
+ movl 80(%esi), %edx
+ sbbl %eax, %edx
+ movl %edx, 60(%esp) # 4-byte Spill
+ movl 84(%ebx), %eax
+ movl 84(%esi), %edx
+ sbbl %eax, %edx
+ movl %edx, 64(%esp) # 4-byte Spill
+ movl 88(%ebx), %eax
+ movl 88(%esi), %edx
+ sbbl %eax, %edx
+ movl %edx, 68(%esp) # 4-byte Spill
+ movl 92(%ebx), %eax
+ movl 92(%esi), %edx
+ sbbl %eax, %edx
+ movl %edx, 72(%esp) # 4-byte Spill
+ movl $0, %eax
+ sbbl $0, %eax
+ andl $1, %eax
+ movl 108(%esp), %ebp
+ jne .LBB189_1
+# BB#2:
+ movl $0, 36(%esp) # 4-byte Folded Spill
+ jmp .LBB189_3
+.LBB189_1:
+ movl 44(%ebp), %edx
+ movl %edx, 36(%esp) # 4-byte Spill
+.LBB189_3:
+ testb %al, %al
+ jne .LBB189_4
+# BB#5:
+ movl $0, 12(%esp) # 4-byte Folded Spill
+ movl $0, %esi
+ jmp .LBB189_6
+.LBB189_4:
+ movl (%ebp), %esi
+ movl 4(%ebp), %eax
+ movl %eax, 12(%esp) # 4-byte Spill
+.LBB189_6:
+ jne .LBB189_7
+# BB#8:
+ movl $0, 20(%esp) # 4-byte Folded Spill
+ jmp .LBB189_9
+.LBB189_7:
+ movl 40(%ebp), %eax
+ movl %eax, 20(%esp) # 4-byte Spill
+.LBB189_9:
+ jne .LBB189_10
+# BB#11:
+ movl $0, 16(%esp) # 4-byte Folded Spill
+ jmp .LBB189_12
+.LBB189_10:
+ movl 36(%ebp), %eax
+ movl %eax, 16(%esp) # 4-byte Spill
+.LBB189_12:
+ jne .LBB189_13
+# BB#14:
+ movl $0, 8(%esp) # 4-byte Folded Spill
+ jmp .LBB189_15
+.LBB189_13:
+ movl 32(%ebp), %eax
+ movl %eax, 8(%esp) # 4-byte Spill
+.LBB189_15:
+ jne .LBB189_16
+# BB#17:
+ movl $0, 4(%esp) # 4-byte Folded Spill
+ jmp .LBB189_18
+.LBB189_16:
+ movl 28(%ebp), %eax
+ movl %eax, 4(%esp) # 4-byte Spill
+.LBB189_18:
+ jne .LBB189_19
+# BB#20:
+ movl $0, (%esp) # 4-byte Folded Spill
+ jmp .LBB189_21
+.LBB189_19:
+ movl 24(%ebp), %eax
+ movl %eax, (%esp) # 4-byte Spill
+.LBB189_21:
+ jne .LBB189_22
+# BB#23:
+ movl $0, %ebx
+ jmp .LBB189_24
+.LBB189_22:
+ movl 20(%ebp), %ebx
+.LBB189_24:
+ jne .LBB189_25
+# BB#26:
+ movl $0, %eax
+ jmp .LBB189_27
+.LBB189_25:
+ movl 16(%ebp), %eax
+.LBB189_27:
+ jne .LBB189_28
+# BB#29:
+ movl %ebp, %edx
+ movl $0, %ebp
+ jmp .LBB189_30
+.LBB189_28:
+ movl %ebp, %edx
+ movl 12(%edx), %ebp
+.LBB189_30:
+ jne .LBB189_31
+# BB#32:
+ xorl %edx, %edx
+ jmp .LBB189_33
+.LBB189_31:
+ movl 8(%edx), %edx
+.LBB189_33:
+ addl 32(%esp), %esi # 4-byte Folded Reload
+ movl 12(%esp), %edi # 4-byte Reload
+ adcl 24(%esp), %edi # 4-byte Folded Reload
+ movl %esi, 48(%ecx)
+ adcl 28(%esp), %edx # 4-byte Folded Reload
+ movl %edi, 52(%ecx)
+ adcl 40(%esp), %ebp # 4-byte Folded Reload
+ movl %edx, 56(%ecx)
+ adcl 44(%esp), %eax # 4-byte Folded Reload
+ movl %ebp, 60(%ecx)
+ adcl 48(%esp), %ebx # 4-byte Folded Reload
+ movl %eax, 64(%ecx)
+ movl (%esp), %eax # 4-byte Reload
+ adcl 52(%esp), %eax # 4-byte Folded Reload
+ movl %ebx, 68(%ecx)
+ movl 4(%esp), %edx # 4-byte Reload
+ adcl 56(%esp), %edx # 4-byte Folded Reload
+ movl %eax, 72(%ecx)
+ movl 8(%esp), %eax # 4-byte Reload
+ adcl 60(%esp), %eax # 4-byte Folded Reload
+ movl %edx, 76(%ecx)
+ movl 16(%esp), %edx # 4-byte Reload
+ adcl 64(%esp), %edx # 4-byte Folded Reload
+ movl %eax, 80(%ecx)
+ movl 20(%esp), %eax # 4-byte Reload
+ adcl 68(%esp), %eax # 4-byte Folded Reload
+ movl %edx, 84(%ecx)
+ movl %eax, 88(%ecx)
+ movl 36(%esp), %eax # 4-byte Reload
+ adcl 72(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 92(%ecx)
+ addl $76, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end189:
+ .size mcl_fpDbl_sub12L, .Lfunc_end189-mcl_fpDbl_sub12L
+
+ .align 16, 0x90
+ .type .LmulPv416x32,@function
+.LmulPv416x32: # @mulPv416x32
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $88, %esp
+ movl %edx, %edi
+ movl 108(%esp), %ebp
+ movl %ebp, %eax
+ mull 48(%edi)
+ movl %edx, 84(%esp) # 4-byte Spill
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl %ebp, %eax
+ mull 44(%edi)
+ movl %edx, 76(%esp) # 4-byte Spill
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl %ebp, %eax
+ mull 40(%edi)
+ movl %edx, 68(%esp) # 4-byte Spill
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl %ebp, %eax
+ mull 36(%edi)
+ movl %edx, 60(%esp) # 4-byte Spill
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl %ebp, %eax
+ mull 32(%edi)
+ movl %edx, 52(%esp) # 4-byte Spill
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl %ebp, %eax
+ mull 28(%edi)
+ movl %edx, 44(%esp) # 4-byte Spill
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl %ebp, %eax
+ mull 24(%edi)
+ movl %edx, 36(%esp) # 4-byte Spill
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl %ebp, %eax
+ mull 20(%edi)
+ movl %edx, 28(%esp) # 4-byte Spill
+ movl %eax, 24(%esp) # 4-byte Spill
+ movl %ebp, %eax
+ mull 16(%edi)
+ movl %edx, 20(%esp) # 4-byte Spill
+ movl %eax, 16(%esp) # 4-byte Spill
+ movl %ebp, %eax
+ mull 12(%edi)
+ movl %edx, 12(%esp) # 4-byte Spill
+ movl %eax, 8(%esp) # 4-byte Spill
+ movl %ebp, %eax
+ mull 8(%edi)
+ movl %edx, %esi
+ movl %eax, 4(%esp) # 4-byte Spill
+ movl %ebp, %eax
+ mull 4(%edi)
+ movl %edx, %ebx
+ movl %eax, (%esp) # 4-byte Spill
+ movl %ebp, %eax
+ mull (%edi)
+ movl %eax, (%ecx)
+ addl (%esp), %edx # 4-byte Folded Reload
+ movl %edx, 4(%ecx)
+ adcl 4(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 8(%ecx)
+ adcl 8(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 12(%ecx)
+ movl 12(%esp), %eax # 4-byte Reload
+ adcl 16(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 16(%ecx)
+ movl 20(%esp), %eax # 4-byte Reload
+ adcl 24(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 20(%ecx)
+ movl 28(%esp), %eax # 4-byte Reload
+ adcl 32(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 24(%ecx)
+ movl 36(%esp), %eax # 4-byte Reload
+ adcl 40(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 28(%ecx)
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 48(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 32(%ecx)
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 56(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 36(%ecx)
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 64(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 40(%ecx)
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 72(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 44(%ecx)
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 80(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 48(%ecx)
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl $0, %eax
+ movl %eax, 52(%ecx)
+ movl %ecx, %eax
+ addl $88, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end190:
+ .size .LmulPv416x32, .Lfunc_end190-.LmulPv416x32
+
+ .globl mcl_fp_mulUnitPre13L
+ .align 16, 0x90
+ .type mcl_fp_mulUnitPre13L,@function
+mcl_fp_mulUnitPre13L: # @mcl_fp_mulUnitPre13L
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $108, %esp
+ calll .L191$pb
+.L191$pb:
+ popl %ebx
+.Ltmp32:
+ addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp32-.L191$pb), %ebx
+ movl 136(%esp), %eax
+ movl %eax, (%esp)
+ leal 48(%esp), %ecx
+ movl 132(%esp), %edx
+ calll .LmulPv416x32
+ movl 100(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 96(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 92(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 88(%esp), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 84(%esp), %eax
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl 80(%esp), %eax
+ movl %eax, 24(%esp) # 4-byte Spill
+ movl 76(%esp), %eax
+ movl %eax, 20(%esp) # 4-byte Spill
+ movl 72(%esp), %eax
+ movl %eax, 16(%esp) # 4-byte Spill
+ movl 68(%esp), %ebp
+ movl 64(%esp), %ebx
+ movl 60(%esp), %edi
+ movl 56(%esp), %esi
+ movl 48(%esp), %edx
+ movl 52(%esp), %ecx
+ movl 128(%esp), %eax
+ movl %edx, (%eax)
+ movl %ecx, 4(%eax)
+ movl %esi, 8(%eax)
+ movl %edi, 12(%eax)
+ movl %ebx, 16(%eax)
+ movl %ebp, 20(%eax)
+ movl 16(%esp), %ecx # 4-byte Reload
+ movl %ecx, 24(%eax)
+ movl 20(%esp), %ecx # 4-byte Reload
+ movl %ecx, 28(%eax)
+ movl 24(%esp), %ecx # 4-byte Reload
+ movl %ecx, 32(%eax)
+ movl 28(%esp), %ecx # 4-byte Reload
+ movl %ecx, 36(%eax)
+ movl 32(%esp), %ecx # 4-byte Reload
+ movl %ecx, 40(%eax)
+ movl 36(%esp), %ecx # 4-byte Reload
+ movl %ecx, 44(%eax)
+ movl 40(%esp), %ecx # 4-byte Reload
+ movl %ecx, 48(%eax)
+ movl 44(%esp), %ecx # 4-byte Reload
+ movl %ecx, 52(%eax)
+ addl $108, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end191:
+ .size mcl_fp_mulUnitPre13L, .Lfunc_end191-mcl_fp_mulUnitPre13L
+
+ .globl mcl_fpDbl_mulPre13L
+ .align 16, 0x90
+ .type mcl_fpDbl_mulPre13L,@function
+mcl_fpDbl_mulPre13L: # @mcl_fpDbl_mulPre13L
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $844, %esp # imm = 0x34C
+ calll .L192$pb
+.L192$pb:
+ popl %edi
+.Ltmp33:
+ addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp33-.L192$pb), %edi
+ movl %edi, 108(%esp) # 4-byte Spill
+ movl 872(%esp), %eax
+ movl (%eax), %eax
+ movl %eax, (%esp)
+ leal 784(%esp), %ecx
+ movl 868(%esp), %edx
+ movl %edx, %esi
+ movl %edi, %ebx
+ calll .LmulPv416x32
+ movl 836(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 832(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 828(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 824(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 820(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 816(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 812(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 808(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 804(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 800(%esp), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 796(%esp), %eax
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl 792(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 784(%esp), %eax
+ movl 788(%esp), %ebp
+ movl 864(%esp), %ecx
+ movl %eax, (%ecx)
+ movl 872(%esp), %eax
+ movl 4(%eax), %eax
+ movl %eax, (%esp)
+ leal 728(%esp), %ecx
+ movl %esi, %edx
+ movl %edi, %ebx
+ calll .LmulPv416x32
+ addl 728(%esp), %ebp
+ movl %ebp, 24(%esp) # 4-byte Spill
+ movl 780(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 776(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 772(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 768(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 764(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 760(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 756(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 752(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 748(%esp), %edi
+ movl 744(%esp), %esi
+ movl 740(%esp), %edx
+ movl 732(%esp), %eax
+ movl 736(%esp), %ecx
+ movl 864(%esp), %ebp
+ movl 24(%esp), %ebx # 4-byte Reload
+ movl %ebx, 4(%ebp)
+ adcl 60(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 60(%esp) # 4-byte Spill
+ adcl 28(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 24(%esp) # 4-byte Spill
+ adcl 32(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 28(%esp) # 4-byte Spill
+ adcl 36(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 32(%esp) # 4-byte Spill
+ adcl 40(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 36(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 56(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 68(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 76(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 84(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 92(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 100(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 104(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 96(%esp) # 4-byte Spill
+ adcl $0, 44(%esp) # 4-byte Folded Spill
+ movl 872(%esp), %eax
+ movl 8(%eax), %eax
+ movl %eax, (%esp)
+ leal 672(%esp), %ecx
+ movl 868(%esp), %edx
+ movl 108(%esp), %ebx # 4-byte Reload
+ calll .LmulPv416x32
+ movl 60(%esp), %eax # 4-byte Reload
+ addl 672(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 724(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 720(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 716(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 712(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 708(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 704(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 700(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 696(%esp), %ebx
+ movl 692(%esp), %edi
+ movl 688(%esp), %esi
+ movl 684(%esp), %edx
+ movl 676(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 680(%esp), %ecx
+ movl 864(%esp), %eax
+ movl 60(%esp), %ebp # 4-byte Reload
+ movl %ebp, 8(%eax)
+ movl 24(%esp), %eax # 4-byte Reload
+ adcl %eax, 104(%esp) # 4-byte Folded Spill
+ adcl 28(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 24(%esp) # 4-byte Spill
+ adcl 32(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 28(%esp) # 4-byte Spill
+ adcl 36(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 32(%esp) # 4-byte Spill
+ adcl 52(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 60(%esp) # 4-byte Spill
+ adcl 48(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 48(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 64(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 72(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 80(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 88(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 96(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 44(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 100(%esp) # 4-byte Spill
+ adcl $0, 40(%esp) # 4-byte Folded Spill
+ movl 872(%esp), %eax
+ movl 12(%eax), %eax
+ movl %eax, (%esp)
+ leal 616(%esp), %ecx
+ movl 868(%esp), %edx
+ movl 108(%esp), %ebx # 4-byte Reload
+ calll .LmulPv416x32
+ movl 104(%esp), %eax # 4-byte Reload
+ addl 616(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 668(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 664(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 660(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 656(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 652(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 648(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 644(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 640(%esp), %ebx
+ movl 636(%esp), %edi
+ movl 632(%esp), %esi
+ movl 628(%esp), %edx
+ movl 620(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 624(%esp), %ecx
+ movl 864(%esp), %eax
+ movl 104(%esp), %ebp # 4-byte Reload
+ movl %ebp, 12(%eax)
+ movl 52(%esp), %ebp # 4-byte Reload
+ adcl 24(%esp), %ebp # 4-byte Folded Reload
+ adcl 28(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 16(%esp) # 4-byte Spill
+ adcl 32(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 20(%esp) # 4-byte Spill
+ adcl 60(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 24(%esp) # 4-byte Spill
+ adcl 48(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 28(%esp) # 4-byte Spill
+ adcl 56(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 56(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 68(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl %eax, 44(%esp) # 4-byte Folded Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 84(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 92(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 100(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 40(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 96(%esp) # 4-byte Spill
+ adcl $0, 36(%esp) # 4-byte Folded Spill
+ movl 872(%esp), %eax
+ movl 16(%eax), %eax
+ movl %eax, (%esp)
+ leal 560(%esp), %ecx
+ movl 868(%esp), %edx
+ movl 108(%esp), %ebx # 4-byte Reload
+ calll .LmulPv416x32
+ addl 560(%esp), %ebp
+ movl %ebp, 52(%esp) # 4-byte Spill
+ movl 612(%esp), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 608(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 604(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 600(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 596(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 592(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 588(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 584(%esp), %ebx
+ movl 580(%esp), %edi
+ movl 576(%esp), %esi
+ movl 572(%esp), %edx
+ movl 564(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 568(%esp), %ecx
+ movl 864(%esp), %eax
+ movl 52(%esp), %ebp # 4-byte Reload
+ movl %ebp, 16(%eax)
+ movl 16(%esp), %eax # 4-byte Reload
+ adcl %eax, 104(%esp) # 4-byte Folded Spill
+ adcl 20(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 16(%esp) # 4-byte Spill
+ adcl 24(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 20(%esp) # 4-byte Spill
+ adcl 28(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 24(%esp) # 4-byte Spill
+ adcl 56(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 28(%esp) # 4-byte Spill
+ adcl 64(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 64(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 44(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl %eax, 40(%esp) # 4-byte Folded Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl %eax, 48(%esp) # 4-byte Folded Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 88(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 96(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 36(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 92(%esp) # 4-byte Spill
+ adcl $0, 32(%esp) # 4-byte Folded Spill
+ movl 872(%esp), %eax
+ movl 20(%eax), %eax
+ movl %eax, (%esp)
+ leal 504(%esp), %ecx
+ movl 868(%esp), %edx
+ movl 108(%esp), %ebx # 4-byte Reload
+ calll .LmulPv416x32
+ movl 104(%esp), %eax # 4-byte Reload
+ addl 504(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 556(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 552(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 548(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 544(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 540(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 536(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 532(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 528(%esp), %ebx
+ movl 524(%esp), %edi
+ movl 520(%esp), %esi
+ movl 516(%esp), %edx
+ movl 508(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 512(%esp), %ecx
+ movl 864(%esp), %eax
+ movl 104(%esp), %ebp # 4-byte Reload
+ movl %ebp, 20(%eax)
+ movl 56(%esp), %ebp # 4-byte Reload
+ adcl 16(%esp), %ebp # 4-byte Folded Reload
+ adcl 20(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 20(%esp) # 4-byte Spill
+ adcl 24(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 24(%esp) # 4-byte Spill
+ adcl 28(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 28(%esp) # 4-byte Spill
+ adcl 64(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 36(%esp) # 4-byte Spill
+ adcl 60(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 60(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 40(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 48(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl %eax, 52(%esp) # 4-byte Folded Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 84(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 92(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 32(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 88(%esp) # 4-byte Spill
+ adcl $0, 44(%esp) # 4-byte Folded Spill
+ movl 872(%esp), %eax
+ movl 24(%eax), %eax
+ movl %eax, (%esp)
+ leal 448(%esp), %ecx
+ movl 868(%esp), %edx
+ movl 108(%esp), %ebx # 4-byte Reload
+ calll .LmulPv416x32
+ addl 448(%esp), %ebp
+ movl %ebp, 56(%esp) # 4-byte Spill
+ movl 500(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 496(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 492(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 488(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 484(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 480(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 476(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 472(%esp), %ebp
+ movl 468(%esp), %edi
+ movl 464(%esp), %esi
+ movl 460(%esp), %edx
+ movl 452(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 456(%esp), %ecx
+ movl 864(%esp), %eax
+ movl 56(%esp), %ebx # 4-byte Reload
+ movl %ebx, 24(%eax)
+ movl 20(%esp), %eax # 4-byte Reload
+ adcl %eax, 104(%esp) # 4-byte Folded Spill
+ adcl 24(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 24(%esp) # 4-byte Spill
+ adcl 28(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 28(%esp) # 4-byte Spill
+ adcl 36(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 32(%esp) # 4-byte Spill
+ adcl 60(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 36(%esp) # 4-byte Spill
+ adcl 68(%esp), %ebp # 4-byte Folded Reload
+ movl %ebp, 20(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 100(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 52(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl %eax, 48(%esp) # 4-byte Folded Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 80(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 88(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 44(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 92(%esp) # 4-byte Spill
+ adcl $0, 40(%esp) # 4-byte Folded Spill
+ movl 872(%esp), %eax
+ movl 28(%eax), %eax
+ movl %eax, (%esp)
+ leal 392(%esp), %ecx
+ movl 868(%esp), %edx
+ movl 108(%esp), %ebx # 4-byte Reload
+ calll .LmulPv416x32
+ movl 104(%esp), %eax # 4-byte Reload
+ addl 392(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 444(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 440(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 436(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 432(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 428(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 424(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 420(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 416(%esp), %ebx
+ movl 412(%esp), %edi
+ movl 408(%esp), %esi
+ movl 404(%esp), %edx
+ movl 396(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 400(%esp), %ecx
+ movl 864(%esp), %eax
+ movl 104(%esp), %ebp # 4-byte Reload
+ movl %ebp, 28(%eax)
+ movl 60(%esp), %ebp # 4-byte Reload
+ adcl 24(%esp), %ebp # 4-byte Folded Reload
+ adcl 28(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 24(%esp) # 4-byte Spill
+ adcl 32(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 28(%esp) # 4-byte Spill
+ adcl 36(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 32(%esp) # 4-byte Spill
+ adcl 20(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 36(%esp) # 4-byte Spill
+ adcl 64(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 20(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 96(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 48(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl %eax, 56(%esp) # 4-byte Folded Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 84(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 92(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 40(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 88(%esp) # 4-byte Spill
+ adcl $0, 44(%esp) # 4-byte Folded Spill
+ movl 872(%esp), %eax
+ movl 32(%eax), %eax
+ movl %eax, (%esp)
+ leal 336(%esp), %ecx
+ movl 868(%esp), %edx
+ movl 108(%esp), %ebx # 4-byte Reload
+ calll .LmulPv416x32
+ addl 336(%esp), %ebp
+ movl %ebp, 60(%esp) # 4-byte Spill
+ movl 388(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 384(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 380(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 376(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 372(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 368(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 364(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 360(%esp), %ebp
+ movl 356(%esp), %edi
+ movl 352(%esp), %esi
+ movl 348(%esp), %edx
+ movl 340(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 344(%esp), %ecx
+ movl 864(%esp), %eax
+ movl 60(%esp), %ebx # 4-byte Reload
+ movl %ebx, 32(%eax)
+ movl 24(%esp), %eax # 4-byte Reload
+ adcl %eax, 104(%esp) # 4-byte Folded Spill
+ adcl 28(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 24(%esp) # 4-byte Spill
+ adcl 32(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 28(%esp) # 4-byte Spill
+ adcl 36(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 32(%esp) # 4-byte Spill
+ adcl 20(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 60(%esp) # 4-byte Spill
+ adcl 68(%esp), %ebp # 4-byte Folded Reload
+ movl %ebp, 16(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 100(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 56(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl %eax, 52(%esp) # 4-byte Folded Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 80(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 88(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 44(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 92(%esp) # 4-byte Spill
+ adcl $0, 40(%esp) # 4-byte Folded Spill
+ movl 872(%esp), %eax
+ movl 36(%eax), %eax
+ movl %eax, (%esp)
+ leal 280(%esp), %ecx
+ movl 868(%esp), %edx
+ movl 108(%esp), %ebx # 4-byte Reload
+ calll .LmulPv416x32
+ movl 104(%esp), %eax # 4-byte Reload
+ addl 280(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 332(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 328(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 324(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 320(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 316(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 312(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 308(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 304(%esp), %ebx
+ movl 300(%esp), %edi
+ movl 296(%esp), %esi
+ movl 292(%esp), %edx
+ movl 284(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 288(%esp), %ecx
+ movl 864(%esp), %eax
+ movl 104(%esp), %ebp # 4-byte Reload
+ movl %ebp, 36(%eax)
+ movl 56(%esp), %ebp # 4-byte Reload
+ adcl 24(%esp), %ebp # 4-byte Folded Reload
+ adcl 28(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 20(%esp) # 4-byte Spill
+ adcl 32(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 24(%esp) # 4-byte Spill
+ adcl 60(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 28(%esp) # 4-byte Spill
+ adcl 16(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 32(%esp) # 4-byte Spill
+ adcl 64(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 16(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 96(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 52(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl %eax, 48(%esp) # 4-byte Folded Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 84(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 92(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 40(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 88(%esp) # 4-byte Spill
+ adcl $0, 36(%esp) # 4-byte Folded Spill
+ movl 872(%esp), %eax
+ movl 40(%eax), %eax
+ movl %eax, (%esp)
+ leal 224(%esp), %ecx
+ movl 868(%esp), %edx
+ movl 108(%esp), %ebx # 4-byte Reload
+ calll .LmulPv416x32
+ addl 224(%esp), %ebp
+ movl %ebp, 56(%esp) # 4-byte Spill
+ movl 276(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 272(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 268(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 264(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 260(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 256(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 252(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 248(%esp), %ebx
+ movl 244(%esp), %edi
+ movl 240(%esp), %esi
+ movl 236(%esp), %edx
+ movl 228(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 232(%esp), %ecx
+ movl 864(%esp), %eax
+ movl 56(%esp), %ebp # 4-byte Reload
+ movl %ebp, 40(%eax)
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 20(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 60(%esp) # 4-byte Spill
+ adcl 24(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 24(%esp) # 4-byte Spill
+ adcl 28(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 28(%esp) # 4-byte Spill
+ adcl 32(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 32(%esp) # 4-byte Spill
+ adcl 16(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 40(%esp) # 4-byte Spill
+ adcl 68(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 20(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 100(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 48(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 72(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 80(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 88(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 36(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 96(%esp) # 4-byte Spill
+ adcl $0, 44(%esp) # 4-byte Folded Spill
+ movl 872(%esp), %edi
+ movl 44(%edi), %eax
+ movl %eax, (%esp)
+ leal 168(%esp), %ecx
+ movl 868(%esp), %eax
+ movl %eax, %edx
+ movl 108(%esp), %ebx # 4-byte Reload
+ calll .LmulPv416x32
+ movl 60(%esp), %esi # 4-byte Reload
+ addl 168(%esp), %esi
+ movl 220(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 216(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 212(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 208(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 204(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 200(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 196(%esp), %ebp
+ movl 192(%esp), %ebx
+ movl 188(%esp), %edi
+ movl 184(%esp), %edx
+ movl 180(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 172(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 176(%esp), %ecx
+ movl 864(%esp), %eax
+ movl %esi, 44(%eax)
+ movl 68(%esp), %esi # 4-byte Reload
+ adcl 24(%esp), %esi # 4-byte Folded Reload
+ adcl 28(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 68(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 32(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 60(%esp) # 4-byte Spill
+ adcl 40(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 40(%esp) # 4-byte Spill
+ adcl 20(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 48(%esp) # 4-byte Spill
+ adcl 64(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 64(%esp) # 4-byte Spill
+ adcl 104(%esp), %ebp # 4-byte Folded Reload
+ movl %ebp, 104(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl %eax, 56(%esp) # 4-byte Folded Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 84(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 92(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 96(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 44(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 100(%esp) # 4-byte Spill
+ adcl $0, 52(%esp) # 4-byte Folded Spill
+ movl 872(%esp), %eax
+ movl 48(%eax), %eax
+ movl %eax, (%esp)
+ leal 112(%esp), %ecx
+ movl 868(%esp), %edx
+ movl 108(%esp), %ebx # 4-byte Reload
+ calll .LmulPv416x32
+ addl 112(%esp), %esi
+ movl %esi, %ebp
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 116(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 60(%esp), %edi # 4-byte Reload
+ adcl 120(%esp), %edi
+ movl 164(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 160(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 156(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 152(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 148(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 144(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 140(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 136(%esp), %ebx
+ movl 132(%esp), %esi
+ movl 128(%esp), %edx
+ movl 124(%esp), %ecx
+ movl 864(%esp), %eax
+ movl %ebp, 48(%eax)
+ movl 68(%esp), %ebp # 4-byte Reload
+ movl %ebp, 52(%eax)
+ adcl 40(%esp), %ecx # 4-byte Folded Reload
+ movl %edi, 56(%eax)
+ adcl 48(%esp), %edx # 4-byte Folded Reload
+ movl %ecx, 60(%eax)
+ adcl 64(%esp), %esi # 4-byte Folded Reload
+ movl %edx, 64(%eax)
+ adcl 104(%esp), %ebx # 4-byte Folded Reload
+ movl %esi, 68(%eax)
+ movl 44(%esp), %ecx # 4-byte Reload
+ adcl 56(%esp), %ecx # 4-byte Folded Reload
+ movl %ebx, 72(%eax)
+ movl 60(%esp), %edx # 4-byte Reload
+ adcl 72(%esp), %edx # 4-byte Folded Reload
+ movl %ecx, 76(%eax)
+ movl 76(%esp), %ecx # 4-byte Reload
+ adcl 88(%esp), %ecx # 4-byte Folded Reload
+ movl %edx, 80(%eax)
+ movl 84(%esp), %edx # 4-byte Reload
+ adcl 80(%esp), %edx # 4-byte Folded Reload
+ movl %ecx, 84(%eax)
+ movl 92(%esp), %ecx # 4-byte Reload
+ adcl 100(%esp), %ecx # 4-byte Folded Reload
+ movl %edx, 88(%eax)
+ movl %ecx, 92(%eax)
+ movl 96(%esp), %ecx # 4-byte Reload
+ adcl 52(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 96(%eax)
+ movl 108(%esp), %ecx # 4-byte Reload
+ adcl $0, %ecx
+ movl %ecx, 100(%eax)
+ addl $844, %esp # imm = 0x34C
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end192:
+ .size mcl_fpDbl_mulPre13L, .Lfunc_end192-mcl_fpDbl_mulPre13L
+
+ .globl mcl_fpDbl_sqrPre13L
+ .align 16, 0x90
+ .type mcl_fpDbl_sqrPre13L,@function
+mcl_fpDbl_sqrPre13L: # @mcl_fpDbl_sqrPre13L
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $844, %esp # imm = 0x34C
+ calll .L193$pb
+.L193$pb:
+ popl %ebx
+.Ltmp34:
+ addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp34-.L193$pb), %ebx
+ movl %ebx, 108(%esp) # 4-byte Spill
+ movl 868(%esp), %edx
+ movl (%edx), %eax
+ movl %eax, (%esp)
+ leal 784(%esp), %ecx
+ movl %edx, %edi
+ movl %ebx, %esi
+ calll .LmulPv416x32
+ movl 836(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 832(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 828(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 824(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 820(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 816(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 812(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 808(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 804(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 800(%esp), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 796(%esp), %eax
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl 792(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 784(%esp), %eax
+ movl 788(%esp), %ebp
+ movl 864(%esp), %ecx
+ movl %eax, (%ecx)
+ movl %edi, %edx
+ movl 4(%edx), %eax
+ movl %eax, (%esp)
+ leal 728(%esp), %ecx
+ movl %esi, %ebx
+ calll .LmulPv416x32
+ addl 728(%esp), %ebp
+ movl %ebp, 24(%esp) # 4-byte Spill
+ movl 780(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 776(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 772(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 768(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 764(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 760(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 756(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 752(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 748(%esp), %edi
+ movl 744(%esp), %esi
+ movl 740(%esp), %edx
+ movl 732(%esp), %eax
+ movl 736(%esp), %ecx
+ movl 864(%esp), %ebp
+ movl 24(%esp), %ebx # 4-byte Reload
+ movl %ebx, 4(%ebp)
+ adcl 60(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 60(%esp) # 4-byte Spill
+ adcl 28(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 24(%esp) # 4-byte Spill
+ adcl 32(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 28(%esp) # 4-byte Spill
+ adcl 36(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 32(%esp) # 4-byte Spill
+ adcl 40(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 36(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 56(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 68(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 76(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 84(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 92(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 100(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 104(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 96(%esp) # 4-byte Spill
+ adcl $0, 44(%esp) # 4-byte Folded Spill
+ movl 868(%esp), %edx
+ movl 8(%edx), %eax
+ movl %eax, (%esp)
+ leal 672(%esp), %ecx
+ movl 108(%esp), %ebx # 4-byte Reload
+ calll .LmulPv416x32
+ movl 60(%esp), %eax # 4-byte Reload
+ addl 672(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 724(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 720(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 716(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 712(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 708(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 704(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 700(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 696(%esp), %ebx
+ movl 692(%esp), %edi
+ movl 688(%esp), %esi
+ movl 684(%esp), %edx
+ movl 676(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 680(%esp), %ecx
+ movl 864(%esp), %eax
+ movl 60(%esp), %ebp # 4-byte Reload
+ movl %ebp, 8(%eax)
+ movl 24(%esp), %eax # 4-byte Reload
+ adcl %eax, 104(%esp) # 4-byte Folded Spill
+ adcl 28(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 24(%esp) # 4-byte Spill
+ adcl 32(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 28(%esp) # 4-byte Spill
+ adcl 36(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 32(%esp) # 4-byte Spill
+ adcl 52(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 60(%esp) # 4-byte Spill
+ adcl 48(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 48(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 64(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 72(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 80(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 88(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 96(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 44(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 100(%esp) # 4-byte Spill
+ adcl $0, 40(%esp) # 4-byte Folded Spill
+ movl 868(%esp), %edx
+ movl 12(%edx), %eax
+ movl %eax, (%esp)
+ leal 616(%esp), %ecx
+ movl 108(%esp), %ebx # 4-byte Reload
+ calll .LmulPv416x32
+ movl 104(%esp), %eax # 4-byte Reload
+ addl 616(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 668(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 664(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 660(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 656(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 652(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 648(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 644(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 640(%esp), %ebx
+ movl 636(%esp), %edi
+ movl 632(%esp), %esi
+ movl 628(%esp), %edx
+ movl 620(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 624(%esp), %ecx
+ movl 864(%esp), %eax
+ movl 104(%esp), %ebp # 4-byte Reload
+ movl %ebp, 12(%eax)
+ movl 52(%esp), %ebp # 4-byte Reload
+ adcl 24(%esp), %ebp # 4-byte Folded Reload
+ adcl 28(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 16(%esp) # 4-byte Spill
+ adcl 32(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 20(%esp) # 4-byte Spill
+ adcl 60(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 24(%esp) # 4-byte Spill
+ adcl 48(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 28(%esp) # 4-byte Spill
+ adcl 56(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 56(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 68(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl %eax, 44(%esp) # 4-byte Folded Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 84(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 92(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 100(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 40(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 96(%esp) # 4-byte Spill
+ adcl $0, 36(%esp) # 4-byte Folded Spill
+ movl 868(%esp), %edx
+ movl 16(%edx), %eax
+ movl %eax, (%esp)
+ leal 560(%esp), %ecx
+ movl 108(%esp), %ebx # 4-byte Reload
+ calll .LmulPv416x32
+ addl 560(%esp), %ebp
+ movl %ebp, 52(%esp) # 4-byte Spill
+ movl 612(%esp), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 608(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 604(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 600(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 596(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 592(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 588(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 584(%esp), %ebx
+ movl 580(%esp), %edi
+ movl 576(%esp), %esi
+ movl 572(%esp), %edx
+ movl 564(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 568(%esp), %ecx
+ movl 864(%esp), %eax
+ movl 52(%esp), %ebp # 4-byte Reload
+ movl %ebp, 16(%eax)
+ movl 16(%esp), %eax # 4-byte Reload
+ adcl %eax, 104(%esp) # 4-byte Folded Spill
+ adcl 20(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 16(%esp) # 4-byte Spill
+ adcl 24(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 20(%esp) # 4-byte Spill
+ adcl 28(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 24(%esp) # 4-byte Spill
+ adcl 56(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 28(%esp) # 4-byte Spill
+ adcl 64(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 64(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 44(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl %eax, 40(%esp) # 4-byte Folded Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl %eax, 48(%esp) # 4-byte Folded Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 88(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 96(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 36(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 92(%esp) # 4-byte Spill
+ adcl $0, 32(%esp) # 4-byte Folded Spill
+ movl 868(%esp), %edx
+ movl 20(%edx), %eax
+ movl %eax, (%esp)
+ leal 504(%esp), %ecx
+ movl 108(%esp), %ebx # 4-byte Reload
+ calll .LmulPv416x32
+ movl 104(%esp), %eax # 4-byte Reload
+ addl 504(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 556(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 552(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 548(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 544(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 540(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 536(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 532(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 528(%esp), %ebx
+ movl 524(%esp), %edi
+ movl 520(%esp), %esi
+ movl 516(%esp), %edx
+ movl 508(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 512(%esp), %ecx
+ movl 864(%esp), %eax
+ movl 104(%esp), %ebp # 4-byte Reload
+ movl %ebp, 20(%eax)
+ movl 56(%esp), %ebp # 4-byte Reload
+ adcl 16(%esp), %ebp # 4-byte Folded Reload
+ adcl 20(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 20(%esp) # 4-byte Spill
+ adcl 24(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 24(%esp) # 4-byte Spill
+ adcl 28(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 28(%esp) # 4-byte Spill
+ adcl 64(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 36(%esp) # 4-byte Spill
+ adcl 60(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 60(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 40(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 48(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl %eax, 52(%esp) # 4-byte Folded Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 84(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 92(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 32(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 88(%esp) # 4-byte Spill
+ adcl $0, 44(%esp) # 4-byte Folded Spill
+ movl 868(%esp), %edx
+ movl 24(%edx), %eax
+ movl %eax, (%esp)
+ leal 448(%esp), %ecx
+ movl 108(%esp), %ebx # 4-byte Reload
+ calll .LmulPv416x32
+ addl 448(%esp), %ebp
+ movl %ebp, 56(%esp) # 4-byte Spill
+ movl 500(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 496(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 492(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 488(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 484(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 480(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 476(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 472(%esp), %ebp
+ movl 468(%esp), %edi
+ movl 464(%esp), %esi
+ movl 460(%esp), %edx
+ movl 452(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 456(%esp), %ecx
+ movl 864(%esp), %eax
+ movl 56(%esp), %ebx # 4-byte Reload
+ movl %ebx, 24(%eax)
+ movl 20(%esp), %eax # 4-byte Reload
+ adcl %eax, 104(%esp) # 4-byte Folded Spill
+ adcl 24(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 24(%esp) # 4-byte Spill
+ adcl 28(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 28(%esp) # 4-byte Spill
+ adcl 36(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 32(%esp) # 4-byte Spill
+ adcl 60(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 36(%esp) # 4-byte Spill
+ adcl 68(%esp), %ebp # 4-byte Folded Reload
+ movl %ebp, 20(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 100(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 52(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl %eax, 48(%esp) # 4-byte Folded Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 80(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 88(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 44(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 92(%esp) # 4-byte Spill
+ adcl $0, 40(%esp) # 4-byte Folded Spill
+ movl 868(%esp), %edx
+ movl 28(%edx), %eax
+ movl %eax, (%esp)
+ leal 392(%esp), %ecx
+ movl 108(%esp), %ebx # 4-byte Reload
+ calll .LmulPv416x32
+ movl 104(%esp), %eax # 4-byte Reload
+ addl 392(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 444(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 440(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 436(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 432(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 428(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 424(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 420(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 416(%esp), %ebx
+ movl 412(%esp), %edi
+ movl 408(%esp), %esi
+ movl 404(%esp), %edx
+ movl 396(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 400(%esp), %ecx
+ movl 864(%esp), %eax
+ movl 104(%esp), %ebp # 4-byte Reload
+ movl %ebp, 28(%eax)
+ movl 60(%esp), %ebp # 4-byte Reload
+ adcl 24(%esp), %ebp # 4-byte Folded Reload
+ adcl 28(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 24(%esp) # 4-byte Spill
+ adcl 32(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 28(%esp) # 4-byte Spill
+ adcl 36(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 32(%esp) # 4-byte Spill
+ adcl 20(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 36(%esp) # 4-byte Spill
+ adcl 64(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 20(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 96(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 48(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl %eax, 56(%esp) # 4-byte Folded Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 84(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 92(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 40(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 88(%esp) # 4-byte Spill
+ adcl $0, 44(%esp) # 4-byte Folded Spill
+ movl 868(%esp), %edx
+ movl 32(%edx), %eax
+ movl %eax, (%esp)
+ leal 336(%esp), %ecx
+ movl 108(%esp), %ebx # 4-byte Reload
+ calll .LmulPv416x32
+ addl 336(%esp), %ebp
+ movl %ebp, 60(%esp) # 4-byte Spill
+ movl 388(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 384(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 380(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 376(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 372(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 368(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 364(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 360(%esp), %ebp
+ movl 356(%esp), %edi
+ movl 352(%esp), %esi
+ movl 348(%esp), %edx
+ movl 340(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 344(%esp), %ecx
+ movl 864(%esp), %eax
+ movl 60(%esp), %ebx # 4-byte Reload
+ movl %ebx, 32(%eax)
+ movl 24(%esp), %eax # 4-byte Reload
+ adcl %eax, 104(%esp) # 4-byte Folded Spill
+ adcl 28(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 24(%esp) # 4-byte Spill
+ adcl 32(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 28(%esp) # 4-byte Spill
+ adcl 36(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 32(%esp) # 4-byte Spill
+ adcl 20(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 60(%esp) # 4-byte Spill
+ adcl 68(%esp), %ebp # 4-byte Folded Reload
+ movl %ebp, 16(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 100(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 56(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl %eax, 52(%esp) # 4-byte Folded Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 80(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 88(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 44(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 92(%esp) # 4-byte Spill
+ adcl $0, 40(%esp) # 4-byte Folded Spill
+ movl 868(%esp), %edx
+ movl 36(%edx), %eax
+ movl %eax, (%esp)
+ leal 280(%esp), %ecx
+ movl 108(%esp), %ebx # 4-byte Reload
+ calll .LmulPv416x32
+ movl 104(%esp), %eax # 4-byte Reload
+ addl 280(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 332(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 328(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 324(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 320(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 316(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 312(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 308(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 304(%esp), %ebx
+ movl 300(%esp), %edi
+ movl 296(%esp), %esi
+ movl 292(%esp), %edx
+ movl 284(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 288(%esp), %ecx
+ movl 864(%esp), %eax
+ movl 104(%esp), %ebp # 4-byte Reload
+ movl %ebp, 36(%eax)
+ movl 56(%esp), %ebp # 4-byte Reload
+ adcl 24(%esp), %ebp # 4-byte Folded Reload
+ adcl 28(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 20(%esp) # 4-byte Spill
+ adcl 32(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 24(%esp) # 4-byte Spill
+ adcl 60(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 28(%esp) # 4-byte Spill
+ adcl 16(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 32(%esp) # 4-byte Spill
+ adcl 64(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 16(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 96(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 52(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl %eax, 48(%esp) # 4-byte Folded Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 84(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 92(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 40(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 88(%esp) # 4-byte Spill
+ adcl $0, 36(%esp) # 4-byte Folded Spill
+ movl 868(%esp), %edx
+ movl 40(%edx), %eax
+ movl %eax, (%esp)
+ leal 224(%esp), %ecx
+ movl 108(%esp), %ebx # 4-byte Reload
+ calll .LmulPv416x32
+ addl 224(%esp), %ebp
+ movl %ebp, 56(%esp) # 4-byte Spill
+ movl 276(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 272(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 268(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 264(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 260(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 256(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 252(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 248(%esp), %ebx
+ movl 244(%esp), %edi
+ movl 240(%esp), %esi
+ movl 236(%esp), %edx
+ movl 228(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 232(%esp), %ecx
+ movl 864(%esp), %eax
+ movl 56(%esp), %ebp # 4-byte Reload
+ movl %ebp, 40(%eax)
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 20(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 60(%esp) # 4-byte Spill
+ adcl 24(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 24(%esp) # 4-byte Spill
+ adcl 28(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 28(%esp) # 4-byte Spill
+ adcl 32(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 32(%esp) # 4-byte Spill
+ adcl 16(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 40(%esp) # 4-byte Spill
+ adcl 68(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 20(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 100(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 48(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 72(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 80(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 88(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 36(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 96(%esp) # 4-byte Spill
+ adcl $0, 44(%esp) # 4-byte Folded Spill
+ movl 868(%esp), %edx
+ movl 44(%edx), %eax
+ movl %eax, (%esp)
+ leal 168(%esp), %ecx
+ movl 108(%esp), %ebx # 4-byte Reload
+ calll .LmulPv416x32
+ movl 60(%esp), %esi # 4-byte Reload
+ addl 168(%esp), %esi
+ movl 220(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 216(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 212(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 208(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 204(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 200(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 196(%esp), %ebp
+ movl 192(%esp), %ebx
+ movl 188(%esp), %edi
+ movl 184(%esp), %edx
+ movl 180(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 172(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 176(%esp), %ecx
+ movl 864(%esp), %eax
+ movl %esi, 44(%eax)
+ movl 68(%esp), %esi # 4-byte Reload
+ adcl 24(%esp), %esi # 4-byte Folded Reload
+ adcl 28(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 68(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 32(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 60(%esp) # 4-byte Spill
+ adcl 40(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 40(%esp) # 4-byte Spill
+ adcl 20(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 48(%esp) # 4-byte Spill
+ adcl 64(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 64(%esp) # 4-byte Spill
+ adcl 104(%esp), %ebp # 4-byte Folded Reload
+ movl %ebp, 104(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl %eax, 56(%esp) # 4-byte Folded Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 84(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 92(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 96(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 44(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 100(%esp) # 4-byte Spill
+ adcl $0, 52(%esp) # 4-byte Folded Spill
+ movl 868(%esp), %edx
+ movl 48(%edx), %eax
+ movl %eax, (%esp)
+ leal 112(%esp), %ecx
+ movl 108(%esp), %ebx # 4-byte Reload
+ calll .LmulPv416x32
+ addl 112(%esp), %esi
+ movl %esi, %ebp
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 116(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 60(%esp), %edi # 4-byte Reload
+ adcl 120(%esp), %edi
+ movl 164(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 160(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 156(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 152(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 148(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 144(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 140(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 136(%esp), %ebx
+ movl 132(%esp), %esi
+ movl 128(%esp), %edx
+ movl 124(%esp), %ecx
+ movl 864(%esp), %eax
+ movl %ebp, 48(%eax)
+ movl 68(%esp), %ebp # 4-byte Reload
+ movl %ebp, 52(%eax)
+ adcl 40(%esp), %ecx # 4-byte Folded Reload
+ movl %edi, 56(%eax)
+ adcl 48(%esp), %edx # 4-byte Folded Reload
+ movl %ecx, 60(%eax)
+ adcl 64(%esp), %esi # 4-byte Folded Reload
+ movl %edx, 64(%eax)
+ adcl 104(%esp), %ebx # 4-byte Folded Reload
+ movl %esi, 68(%eax)
+ movl 44(%esp), %ecx # 4-byte Reload
+ adcl 56(%esp), %ecx # 4-byte Folded Reload
+ movl %ebx, 72(%eax)
+ movl 60(%esp), %edx # 4-byte Reload
+ adcl 72(%esp), %edx # 4-byte Folded Reload
+ movl %ecx, 76(%eax)
+ movl 76(%esp), %ecx # 4-byte Reload
+ adcl 88(%esp), %ecx # 4-byte Folded Reload
+ movl %edx, 80(%eax)
+ movl 84(%esp), %edx # 4-byte Reload
+ adcl 80(%esp), %edx # 4-byte Folded Reload
+ movl %ecx, 84(%eax)
+ movl 92(%esp), %ecx # 4-byte Reload
+ adcl 100(%esp), %ecx # 4-byte Folded Reload
+ movl %edx, 88(%eax)
+ movl %ecx, 92(%eax)
+ movl 96(%esp), %ecx # 4-byte Reload
+ adcl 52(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 96(%eax)
+ movl 108(%esp), %ecx # 4-byte Reload
+ adcl $0, %ecx
+ movl %ecx, 100(%eax)
+ addl $844, %esp # imm = 0x34C
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end193:
+ .size mcl_fpDbl_sqrPre13L, .Lfunc_end193-mcl_fpDbl_sqrPre13L
+
+ .globl mcl_fp_mont13L
+ .align 16, 0x90
+ .type mcl_fp_mont13L,@function
+mcl_fp_mont13L: # @mcl_fp_mont13L
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $1548, %esp # imm = 0x60C
+ calll .L194$pb
+.L194$pb:
+ popl %ebx
+.Ltmp35:
+ addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp35-.L194$pb), %ebx
+ movl 1580(%esp), %eax
+ movl -4(%eax), %edi
+ movl %edi, 28(%esp) # 4-byte Spill
+ movl 1576(%esp), %eax
+ movl (%eax), %eax
+ movl %eax, (%esp)
+ leal 1488(%esp), %ecx
+ movl 1572(%esp), %edx
+ calll .LmulPv416x32
+ movl 1488(%esp), %esi
+ movl 1492(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl %esi, %eax
+ imull %edi, %eax
+ movl 1540(%esp), %ecx
+ movl %ecx, 80(%esp) # 4-byte Spill
+ movl 1536(%esp), %ecx
+ movl %ecx, 72(%esp) # 4-byte Spill
+ movl 1532(%esp), %ecx
+ movl %ecx, 68(%esp) # 4-byte Spill
+ movl 1528(%esp), %ecx
+ movl %ecx, 52(%esp) # 4-byte Spill
+ movl 1524(%esp), %ecx
+ movl %ecx, 56(%esp) # 4-byte Spill
+ movl 1520(%esp), %ecx
+ movl %ecx, 44(%esp) # 4-byte Spill
+ movl 1516(%esp), %ecx
+ movl %ecx, 36(%esp) # 4-byte Spill
+ movl 1512(%esp), %ecx
+ movl %ecx, 32(%esp) # 4-byte Spill
+ movl 1508(%esp), %ecx
+ movl %ecx, 48(%esp) # 4-byte Spill
+ movl 1504(%esp), %edi
+ movl 1500(%esp), %ebp
+ movl 1496(%esp), %ecx
+ movl %ecx, 84(%esp) # 4-byte Spill
+ movl %eax, (%esp)
+ leal 1432(%esp), %ecx
+ movl 1580(%esp), %edx
+ calll .LmulPv416x32
+ addl 1432(%esp), %esi
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 1436(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 1440(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ adcl 1444(%esp), %ebp
+ movl %ebp, 60(%esp) # 4-byte Spill
+ adcl 1448(%esp), %edi
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 1452(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 32(%esp), %eax # 4-byte Reload
+ adcl 1456(%esp), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 36(%esp), %eax # 4-byte Reload
+ adcl 1460(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 1464(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 1468(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 52(%esp), %ebp # 4-byte Reload
+ adcl 1472(%esp), %ebp
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 1476(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 1480(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 1484(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ sbbl %esi, %esi
+ movl 1576(%esp), %eax
+ movl 4(%eax), %eax
+ movl %eax, (%esp)
+ leal 1376(%esp), %ecx
+ movl 1572(%esp), %edx
+ calll .LmulPv416x32
+ andl $1, %esi
+ movl 76(%esp), %ecx # 4-byte Reload
+ addl 1376(%esp), %ecx
+ movl %ecx, 76(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 1380(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 1384(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ adcl 1388(%esp), %edi
+ movl %edi, 40(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 1392(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 32(%esp), %eax # 4-byte Reload
+ adcl 1396(%esp), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 36(%esp), %eax # 4-byte Reload
+ adcl 1400(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 44(%esp), %edi # 4-byte Reload
+ adcl 1404(%esp), %edi
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 1408(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ adcl 1412(%esp), %ebp
+ movl %ebp, 52(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 1416(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 72(%esp), %ebp # 4-byte Reload
+ adcl 1420(%esp), %ebp
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 1424(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ adcl 1428(%esp), %esi
+ movl %esi, 64(%esp) # 4-byte Spill
+ sbbl %esi, %esi
+ movl %ecx, %eax
+ imull 28(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 1320(%esp), %ecx
+ movl 1580(%esp), %edx
+ calll .LmulPv416x32
+ andl $1, %esi
+ movl %esi, %eax
+ movl 76(%esp), %ecx # 4-byte Reload
+ addl 1320(%esp), %ecx
+ movl 84(%esp), %ecx # 4-byte Reload
+ adcl 1324(%esp), %ecx
+ movl %ecx, 84(%esp) # 4-byte Spill
+ movl 60(%esp), %ecx # 4-byte Reload
+ adcl 1328(%esp), %ecx
+ movl %ecx, 60(%esp) # 4-byte Spill
+ movl 40(%esp), %ecx # 4-byte Reload
+ adcl 1332(%esp), %ecx
+ movl %ecx, 40(%esp) # 4-byte Spill
+ movl 48(%esp), %ecx # 4-byte Reload
+ adcl 1336(%esp), %ecx
+ movl %ecx, 48(%esp) # 4-byte Spill
+ movl 32(%esp), %esi # 4-byte Reload
+ adcl 1340(%esp), %esi
+ movl 36(%esp), %ecx # 4-byte Reload
+ adcl 1344(%esp), %ecx
+ movl %ecx, 36(%esp) # 4-byte Spill
+ adcl 1348(%esp), %edi
+ movl %edi, 44(%esp) # 4-byte Spill
+ movl 56(%esp), %ecx # 4-byte Reload
+ adcl 1352(%esp), %ecx
+ movl %ecx, 56(%esp) # 4-byte Spill
+ movl 52(%esp), %ecx # 4-byte Reload
+ adcl 1356(%esp), %ecx
+ movl %ecx, 52(%esp) # 4-byte Spill
+ movl 68(%esp), %ecx # 4-byte Reload
+ adcl 1360(%esp), %ecx
+ movl %ecx, 68(%esp) # 4-byte Spill
+ adcl 1364(%esp), %ebp
+ movl %ebp, 72(%esp) # 4-byte Spill
+ movl 80(%esp), %ebp # 4-byte Reload
+ adcl 1368(%esp), %ebp
+ movl 64(%esp), %edi # 4-byte Reload
+ adcl 1372(%esp), %edi
+ adcl $0, %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 1576(%esp), %eax
+ movl 8(%eax), %eax
+ movl %eax, (%esp)
+ leal 1264(%esp), %ecx
+ movl 1572(%esp), %edx
+ calll .LmulPv416x32
+ movl 84(%esp), %ecx # 4-byte Reload
+ addl 1264(%esp), %ecx
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 1268(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 40(%esp), %eax # 4-byte Reload
+ adcl 1272(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 1276(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ adcl 1280(%esp), %esi
+ movl %esi, 32(%esp) # 4-byte Spill
+ movl 36(%esp), %eax # 4-byte Reload
+ adcl 1284(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 1288(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 1292(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 1296(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 68(%esp), %esi # 4-byte Reload
+ adcl 1300(%esp), %esi
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 1304(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ adcl 1308(%esp), %ebp
+ adcl 1312(%esp), %edi
+ movl %edi, 64(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 1316(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ sbbl %eax, %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl %ecx, %edi
+ movl %edi, %eax
+ imull 28(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 1208(%esp), %ecx
+ movl 1580(%esp), %eax
+ movl %eax, %edx
+ calll .LmulPv416x32
+ movl 84(%esp), %eax # 4-byte Reload
+ andl $1, %eax
+ addl 1208(%esp), %edi
+ movl 60(%esp), %ecx # 4-byte Reload
+ adcl 1212(%esp), %ecx
+ movl %ecx, 60(%esp) # 4-byte Spill
+ movl 40(%esp), %ecx # 4-byte Reload
+ adcl 1216(%esp), %ecx
+ movl %ecx, 40(%esp) # 4-byte Spill
+ movl 48(%esp), %ecx # 4-byte Reload
+ adcl 1220(%esp), %ecx
+ movl %ecx, 48(%esp) # 4-byte Spill
+ movl 32(%esp), %ecx # 4-byte Reload
+ adcl 1224(%esp), %ecx
+ movl %ecx, 32(%esp) # 4-byte Spill
+ movl 36(%esp), %ecx # 4-byte Reload
+ adcl 1228(%esp), %ecx
+ movl %ecx, 36(%esp) # 4-byte Spill
+ movl 44(%esp), %ecx # 4-byte Reload
+ adcl 1232(%esp), %ecx
+ movl %ecx, 44(%esp) # 4-byte Spill
+ movl 56(%esp), %ecx # 4-byte Reload
+ adcl 1236(%esp), %ecx
+ movl %ecx, 56(%esp) # 4-byte Spill
+ movl 52(%esp), %ecx # 4-byte Reload
+ adcl 1240(%esp), %ecx
+ movl %ecx, 52(%esp) # 4-byte Spill
+ adcl 1244(%esp), %esi
+ movl %esi, 68(%esp) # 4-byte Spill
+ movl 72(%esp), %edi # 4-byte Reload
+ adcl 1248(%esp), %edi
+ adcl 1252(%esp), %ebp
+ movl %ebp, %esi
+ movl 64(%esp), %ecx # 4-byte Reload
+ adcl 1256(%esp), %ecx
+ movl %ecx, 64(%esp) # 4-byte Spill
+ movl 76(%esp), %ebp # 4-byte Reload
+ adcl 1260(%esp), %ebp
+ adcl $0, %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 1576(%esp), %eax
+ movl 12(%eax), %eax
+ movl %eax, (%esp)
+ leal 1152(%esp), %ecx
+ movl 1572(%esp), %edx
+ calll .LmulPv416x32
+ movl 60(%esp), %ecx # 4-byte Reload
+ addl 1152(%esp), %ecx
+ movl 40(%esp), %eax # 4-byte Reload
+ adcl 1156(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 1160(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 32(%esp), %eax # 4-byte Reload
+ adcl 1164(%esp), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 36(%esp), %eax # 4-byte Reload
+ adcl 1168(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 1172(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 1176(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 1180(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 1184(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ adcl 1188(%esp), %edi
+ movl %edi, 72(%esp) # 4-byte Spill
+ adcl 1192(%esp), %esi
+ movl %esi, 80(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 1196(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ adcl 1200(%esp), %ebp
+ movl %ebp, 76(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 1204(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ sbbl %ebp, %ebp
+ movl %ecx, %esi
+ movl %esi, %eax
+ imull 28(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 1096(%esp), %ecx
+ movl 1580(%esp), %edx
+ calll .LmulPv416x32
+ andl $1, %ebp
+ movl %ebp, %eax
+ addl 1096(%esp), %esi
+ movl 40(%esp), %ecx # 4-byte Reload
+ adcl 1100(%esp), %ecx
+ movl %ecx, 40(%esp) # 4-byte Spill
+ movl 48(%esp), %ecx # 4-byte Reload
+ adcl 1104(%esp), %ecx
+ movl %ecx, 48(%esp) # 4-byte Spill
+ movl 32(%esp), %ecx # 4-byte Reload
+ adcl 1108(%esp), %ecx
+ movl %ecx, 32(%esp) # 4-byte Spill
+ movl 36(%esp), %ecx # 4-byte Reload
+ adcl 1112(%esp), %ecx
+ movl %ecx, 36(%esp) # 4-byte Spill
+ movl 44(%esp), %ecx # 4-byte Reload
+ adcl 1116(%esp), %ecx
+ movl %ecx, 44(%esp) # 4-byte Spill
+ movl 56(%esp), %ecx # 4-byte Reload
+ adcl 1120(%esp), %ecx
+ movl %ecx, 56(%esp) # 4-byte Spill
+ movl 52(%esp), %ebp # 4-byte Reload
+ adcl 1124(%esp), %ebp
+ movl 68(%esp), %edi # 4-byte Reload
+ adcl 1128(%esp), %edi
+ movl 72(%esp), %esi # 4-byte Reload
+ adcl 1132(%esp), %esi
+ movl 80(%esp), %ecx # 4-byte Reload
+ adcl 1136(%esp), %ecx
+ movl %ecx, 80(%esp) # 4-byte Spill
+ movl 64(%esp), %ecx # 4-byte Reload
+ adcl 1140(%esp), %ecx
+ movl %ecx, 64(%esp) # 4-byte Spill
+ movl 76(%esp), %ecx # 4-byte Reload
+ adcl 1144(%esp), %ecx
+ movl %ecx, 76(%esp) # 4-byte Spill
+ movl 84(%esp), %ecx # 4-byte Reload
+ adcl 1148(%esp), %ecx
+ movl %ecx, 84(%esp) # 4-byte Spill
+ adcl $0, %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 1576(%esp), %eax
+ movl 16(%eax), %eax
+ movl %eax, (%esp)
+ leal 1040(%esp), %ecx
+ movl 1572(%esp), %edx
+ calll .LmulPv416x32
+ movl 40(%esp), %ecx # 4-byte Reload
+ addl 1040(%esp), %ecx
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 1044(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 32(%esp), %eax # 4-byte Reload
+ adcl 1048(%esp), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 36(%esp), %eax # 4-byte Reload
+ adcl 1052(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 1056(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 1060(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ adcl 1064(%esp), %ebp
+ movl %ebp, 52(%esp) # 4-byte Spill
+ adcl 1068(%esp), %edi
+ movl %edi, 68(%esp) # 4-byte Spill
+ adcl 1072(%esp), %esi
+ movl %esi, 72(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 1076(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 1080(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 76(%esp), %esi # 4-byte Reload
+ adcl 1084(%esp), %esi
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 1088(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 1092(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ sbbl %edi, %edi
+ movl %ecx, %ebp
+ movl %ebp, %eax
+ imull 28(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 984(%esp), %ecx
+ movl 1580(%esp), %edx
+ calll .LmulPv416x32
+ andl $1, %edi
+ addl 984(%esp), %ebp
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 988(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 32(%esp), %eax # 4-byte Reload
+ adcl 992(%esp), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 36(%esp), %ebp # 4-byte Reload
+ adcl 996(%esp), %ebp
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 1000(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 1004(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 1008(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 1012(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 1016(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 1020(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 1024(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ adcl 1028(%esp), %esi
+ movl %esi, 76(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 1032(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 1036(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ adcl $0, %edi
+ movl 1576(%esp), %eax
+ movl 20(%eax), %eax
+ movl %eax, (%esp)
+ leal 928(%esp), %ecx
+ movl 1572(%esp), %edx
+ calll .LmulPv416x32
+ movl 48(%esp), %ecx # 4-byte Reload
+ addl 928(%esp), %ecx
+ movl 32(%esp), %eax # 4-byte Reload
+ adcl 932(%esp), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ adcl 936(%esp), %ebp
+ movl %ebp, 36(%esp) # 4-byte Spill
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 940(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 56(%esp), %esi # 4-byte Reload
+ adcl 944(%esp), %esi
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 948(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 952(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 956(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 960(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 964(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 968(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 972(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 976(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ adcl 980(%esp), %edi
+ movl %edi, 48(%esp) # 4-byte Spill
+ sbbl %edi, %edi
+ movl %ecx, %eax
+ movl %ecx, %ebp
+ imull 28(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 872(%esp), %ecx
+ movl 1580(%esp), %edx
+ calll .LmulPv416x32
+ andl $1, %edi
+ addl 872(%esp), %ebp
+ movl 32(%esp), %eax # 4-byte Reload
+ adcl 876(%esp), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 36(%esp), %eax # 4-byte Reload
+ adcl 880(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 44(%esp), %ebp # 4-byte Reload
+ adcl 884(%esp), %ebp
+ adcl 888(%esp), %esi
+ movl %esi, 56(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 892(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 896(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 900(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 80(%esp), %esi # 4-byte Reload
+ adcl 904(%esp), %esi
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 908(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 912(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 916(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 920(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 924(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ adcl $0, %edi
+ movl %edi, 40(%esp) # 4-byte Spill
+ movl 1576(%esp), %eax
+ movl 24(%eax), %eax
+ movl %eax, (%esp)
+ leal 816(%esp), %ecx
+ movl 1572(%esp), %eax
+ movl %eax, %edx
+ calll .LmulPv416x32
+ movl 32(%esp), %ecx # 4-byte Reload
+ addl 816(%esp), %ecx
+ movl %ecx, 32(%esp) # 4-byte Spill
+ movl 36(%esp), %eax # 4-byte Reload
+ adcl 820(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ adcl 824(%esp), %ebp
+ movl %ebp, 44(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 828(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 832(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 836(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 840(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ adcl 844(%esp), %esi
+ movl %esi, 80(%esp) # 4-byte Spill
+ movl 64(%esp), %ebp # 4-byte Reload
+ adcl 848(%esp), %ebp
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 852(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 84(%esp), %edi # 4-byte Reload
+ adcl 856(%esp), %edi
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 860(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 864(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 40(%esp), %eax # 4-byte Reload
+ adcl 868(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ sbbl %esi, %esi
+ movl %ecx, %eax
+ imull 28(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 760(%esp), %ecx
+ movl 1580(%esp), %edx
+ calll .LmulPv416x32
+ andl $1, %esi
+ movl %esi, %eax
+ movl 32(%esp), %ecx # 4-byte Reload
+ addl 760(%esp), %ecx
+ movl 36(%esp), %ecx # 4-byte Reload
+ adcl 764(%esp), %ecx
+ movl %ecx, 36(%esp) # 4-byte Spill
+ movl 44(%esp), %ecx # 4-byte Reload
+ adcl 768(%esp), %ecx
+ movl %ecx, 44(%esp) # 4-byte Spill
+ movl 56(%esp), %ecx # 4-byte Reload
+ adcl 772(%esp), %ecx
+ movl %ecx, 56(%esp) # 4-byte Spill
+ movl 52(%esp), %ecx # 4-byte Reload
+ adcl 776(%esp), %ecx
+ movl %ecx, 52(%esp) # 4-byte Spill
+ movl 68(%esp), %ecx # 4-byte Reload
+ adcl 780(%esp), %ecx
+ movl %ecx, 68(%esp) # 4-byte Spill
+ movl 72(%esp), %esi # 4-byte Reload
+ adcl 784(%esp), %esi
+ movl 80(%esp), %ecx # 4-byte Reload
+ adcl 788(%esp), %ecx
+ movl %ecx, 80(%esp) # 4-byte Spill
+ adcl 792(%esp), %ebp
+ movl %ebp, 64(%esp) # 4-byte Spill
+ movl 76(%esp), %ecx # 4-byte Reload
+ adcl 796(%esp), %ecx
+ movl %ecx, 76(%esp) # 4-byte Spill
+ adcl 800(%esp), %edi
+ movl %edi, 84(%esp) # 4-byte Spill
+ movl 60(%esp), %ecx # 4-byte Reload
+ adcl 804(%esp), %ecx
+ movl %ecx, 60(%esp) # 4-byte Spill
+ movl 48(%esp), %ecx # 4-byte Reload
+ adcl 808(%esp), %ecx
+ movl %ecx, 48(%esp) # 4-byte Spill
+ movl 40(%esp), %edi # 4-byte Reload
+ adcl 812(%esp), %edi
+ adcl $0, %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 1576(%esp), %eax
+ movl 28(%eax), %eax
+ movl %eax, (%esp)
+ leal 704(%esp), %ecx
+ movl 1572(%esp), %edx
+ calll .LmulPv416x32
+ movl 36(%esp), %eax # 4-byte Reload
+ addl 704(%esp), %eax
+ movl 44(%esp), %ecx # 4-byte Reload
+ adcl 708(%esp), %ecx
+ movl %ecx, 44(%esp) # 4-byte Spill
+ movl 56(%esp), %ecx # 4-byte Reload
+ adcl 712(%esp), %ecx
+ movl %ecx, 56(%esp) # 4-byte Spill
+ movl 52(%esp), %ecx # 4-byte Reload
+ adcl 716(%esp), %ecx
+ movl %ecx, 52(%esp) # 4-byte Spill
+ movl 68(%esp), %ebp # 4-byte Reload
+ adcl 720(%esp), %ebp
+ adcl 724(%esp), %esi
+ movl %esi, 72(%esp) # 4-byte Spill
+ movl 80(%esp), %ecx # 4-byte Reload
+ adcl 728(%esp), %ecx
+ movl %ecx, 80(%esp) # 4-byte Spill
+ movl 64(%esp), %ecx # 4-byte Reload
+ adcl 732(%esp), %ecx
+ movl %ecx, 64(%esp) # 4-byte Spill
+ movl 76(%esp), %ecx # 4-byte Reload
+ adcl 736(%esp), %ecx
+ movl %ecx, 76(%esp) # 4-byte Spill
+ movl 84(%esp), %ecx # 4-byte Reload
+ adcl 740(%esp), %ecx
+ movl %ecx, 84(%esp) # 4-byte Spill
+ movl 60(%esp), %ecx # 4-byte Reload
+ adcl 744(%esp), %ecx
+ movl %ecx, 60(%esp) # 4-byte Spill
+ movl 48(%esp), %ecx # 4-byte Reload
+ adcl 748(%esp), %ecx
+ movl %ecx, 48(%esp) # 4-byte Spill
+ adcl 752(%esp), %edi
+ movl %edi, 40(%esp) # 4-byte Spill
+ movl 32(%esp), %ecx # 4-byte Reload
+ adcl 756(%esp), %ecx
+ movl %ecx, 32(%esp) # 4-byte Spill
+ sbbl %edi, %edi
+ movl %eax, %esi
+ imull 28(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 648(%esp), %ecx
+ movl 1580(%esp), %edx
+ calll .LmulPv416x32
+ movl %edi, %eax
+ andl $1, %eax
+ addl 648(%esp), %esi
+ movl 44(%esp), %ecx # 4-byte Reload
+ adcl 652(%esp), %ecx
+ movl %ecx, 44(%esp) # 4-byte Spill
+ movl 56(%esp), %ecx # 4-byte Reload
+ adcl 656(%esp), %ecx
+ movl %ecx, 56(%esp) # 4-byte Spill
+ movl 52(%esp), %ecx # 4-byte Reload
+ adcl 660(%esp), %ecx
+ movl %ecx, 52(%esp) # 4-byte Spill
+ adcl 664(%esp), %ebp
+ movl %ebp, 68(%esp) # 4-byte Spill
+ movl 72(%esp), %ecx # 4-byte Reload
+ adcl 668(%esp), %ecx
+ movl %ecx, 72(%esp) # 4-byte Spill
+ movl 80(%esp), %edi # 4-byte Reload
+ adcl 672(%esp), %edi
+ movl 64(%esp), %esi # 4-byte Reload
+ adcl 676(%esp), %esi
+ movl 76(%esp), %ecx # 4-byte Reload
+ adcl 680(%esp), %ecx
+ movl %ecx, 76(%esp) # 4-byte Spill
+ movl 84(%esp), %ecx # 4-byte Reload
+ adcl 684(%esp), %ecx
+ movl %ecx, 84(%esp) # 4-byte Spill
+ movl 60(%esp), %ecx # 4-byte Reload
+ adcl 688(%esp), %ecx
+ movl %ecx, 60(%esp) # 4-byte Spill
+ movl 48(%esp), %ecx # 4-byte Reload
+ adcl 692(%esp), %ecx
+ movl %ecx, 48(%esp) # 4-byte Spill
+ movl 40(%esp), %ecx # 4-byte Reload
+ adcl 696(%esp), %ecx
+ movl %ecx, 40(%esp) # 4-byte Spill
+ movl 32(%esp), %ecx # 4-byte Reload
+ adcl 700(%esp), %ecx
+ movl %ecx, 32(%esp) # 4-byte Spill
+ adcl $0, %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 1576(%esp), %eax
+ movl 32(%eax), %eax
+ movl %eax, (%esp)
+ leal 592(%esp), %ecx
+ movl 1572(%esp), %edx
+ calll .LmulPv416x32
+ movl 44(%esp), %ecx # 4-byte Reload
+ addl 592(%esp), %ecx
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 596(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 52(%esp), %ebp # 4-byte Reload
+ adcl 600(%esp), %ebp
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 604(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 608(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ adcl 612(%esp), %edi
+ movl %edi, 80(%esp) # 4-byte Spill
+ adcl 616(%esp), %esi
+ movl %esi, 64(%esp) # 4-byte Spill
+ movl 76(%esp), %edi # 4-byte Reload
+ adcl 620(%esp), %edi
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 624(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 628(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 632(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 40(%esp), %eax # 4-byte Reload
+ adcl 636(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 32(%esp), %eax # 4-byte Reload
+ adcl 640(%esp), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 36(%esp), %eax # 4-byte Reload
+ adcl 644(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ sbbl %eax, %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ movl %ecx, %esi
+ imull 28(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 536(%esp), %ecx
+ movl 1580(%esp), %edx
+ calll .LmulPv416x32
+ movl 44(%esp), %ecx # 4-byte Reload
+ andl $1, %ecx
+ addl 536(%esp), %esi
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 540(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ adcl 544(%esp), %ebp
+ movl %ebp, 52(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 548(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 552(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 556(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 64(%esp), %esi # 4-byte Reload
+ adcl 560(%esp), %esi
+ adcl 564(%esp), %edi
+ movl %edi, 76(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 568(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 60(%esp), %edi # 4-byte Reload
+ adcl 572(%esp), %edi
+ movl 48(%esp), %ebp # 4-byte Reload
+ adcl 576(%esp), %ebp
+ movl 40(%esp), %eax # 4-byte Reload
+ adcl 580(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 32(%esp), %eax # 4-byte Reload
+ adcl 584(%esp), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 36(%esp), %eax # 4-byte Reload
+ adcl 588(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ adcl $0, %ecx
+ movl %ecx, 44(%esp) # 4-byte Spill
+ movl 1576(%esp), %eax
+ movl 36(%eax), %eax
+ movl %eax, (%esp)
+ leal 480(%esp), %ecx
+ movl 1572(%esp), %edx
+ calll .LmulPv416x32
+ movl 56(%esp), %ecx # 4-byte Reload
+ addl 480(%esp), %ecx
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 484(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 488(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 492(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 496(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ adcl 500(%esp), %esi
+ movl %esi, 64(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 504(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 508(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ adcl 512(%esp), %edi
+ movl %edi, 60(%esp) # 4-byte Spill
+ adcl 516(%esp), %ebp
+ movl %ebp, 48(%esp) # 4-byte Spill
+ movl 40(%esp), %eax # 4-byte Reload
+ adcl 520(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 32(%esp), %eax # 4-byte Reload
+ adcl 524(%esp), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 36(%esp), %eax # 4-byte Reload
+ adcl 528(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 532(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ sbbl %edi, %edi
+ movl %ecx, %eax
+ movl %ecx, %esi
+ imull 28(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 424(%esp), %ecx
+ movl 1580(%esp), %edx
+ calll .LmulPv416x32
+ movl %edi, %ecx
+ andl $1, %ecx
+ addl 424(%esp), %esi
+ movl 52(%esp), %ebp # 4-byte Reload
+ adcl 428(%esp), %ebp
+ movl 68(%esp), %edi # 4-byte Reload
+ adcl 432(%esp), %edi
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 436(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 440(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 64(%esp), %esi # 4-byte Reload
+ adcl 444(%esp), %esi
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 448(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 452(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 456(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 460(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 40(%esp), %eax # 4-byte Reload
+ adcl 464(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 32(%esp), %eax # 4-byte Reload
+ adcl 468(%esp), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 36(%esp), %eax # 4-byte Reload
+ adcl 472(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 476(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ adcl $0, %ecx
+ movl %ecx, 56(%esp) # 4-byte Spill
+ movl 1576(%esp), %eax
+ movl 40(%eax), %eax
+ movl %eax, (%esp)
+ leal 368(%esp), %ecx
+ movl 1572(%esp), %edx
+ calll .LmulPv416x32
+ addl 368(%esp), %ebp
+ adcl 372(%esp), %edi
+ movl %edi, 68(%esp) # 4-byte Spill
+ movl 72(%esp), %edi # 4-byte Reload
+ adcl 376(%esp), %edi
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 380(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ adcl 384(%esp), %esi
+ movl %esi, 64(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 388(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 84(%esp), %esi # 4-byte Reload
+ adcl 392(%esp), %esi
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 396(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 400(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 40(%esp), %eax # 4-byte Reload
+ adcl 404(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 32(%esp), %eax # 4-byte Reload
+ adcl 408(%esp), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 36(%esp), %eax # 4-byte Reload
+ adcl 412(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 416(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 420(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ sbbl %eax, %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl %ebp, %eax
+ imull 28(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 312(%esp), %ecx
+ movl 1580(%esp), %edx
+ calll .LmulPv416x32
+ movl 52(%esp), %ecx # 4-byte Reload
+ andl $1, %ecx
+ addl 312(%esp), %ebp
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 316(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ adcl 320(%esp), %edi
+ movl %edi, 72(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 324(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 64(%esp), %ebp # 4-byte Reload
+ adcl 328(%esp), %ebp
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 332(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ adcl 336(%esp), %esi
+ movl %esi, 84(%esp) # 4-byte Spill
+ movl 60(%esp), %edi # 4-byte Reload
+ adcl 340(%esp), %edi
+ movl 48(%esp), %esi # 4-byte Reload
+ adcl 344(%esp), %esi
+ movl 40(%esp), %eax # 4-byte Reload
+ adcl 348(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 32(%esp), %eax # 4-byte Reload
+ adcl 352(%esp), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 36(%esp), %eax # 4-byte Reload
+ adcl 356(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 360(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 364(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ adcl $0, %ecx
+ movl %ecx, 52(%esp) # 4-byte Spill
+ movl 1576(%esp), %eax
+ movl 44(%eax), %eax
+ movl %eax, (%esp)
+ leal 256(%esp), %ecx
+ movl 1572(%esp), %edx
+ calll .LmulPv416x32
+ movl 68(%esp), %ecx # 4-byte Reload
+ addl 256(%esp), %ecx
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 260(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 264(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ adcl 268(%esp), %ebp
+ movl %ebp, 64(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 272(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 276(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ adcl 280(%esp), %edi
+ movl %edi, 60(%esp) # 4-byte Spill
+ adcl 284(%esp), %esi
+ movl %esi, 48(%esp) # 4-byte Spill
+ movl 40(%esp), %edi # 4-byte Reload
+ adcl 288(%esp), %edi
+ movl 32(%esp), %eax # 4-byte Reload
+ adcl 292(%esp), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 36(%esp), %eax # 4-byte Reload
+ adcl 296(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 300(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 304(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 308(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ sbbl %ebp, %ebp
+ movl %ecx, %eax
+ movl %ecx, %esi
+ imull 28(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 200(%esp), %ecx
+ movl 1580(%esp), %edx
+ calll .LmulPv416x32
+ andl $1, %ebp
+ movl %ebp, %ecx
+ addl 200(%esp), %esi
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 204(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 80(%esp), %ebp # 4-byte Reload
+ adcl 208(%esp), %ebp
+ movl 64(%esp), %esi # 4-byte Reload
+ adcl 212(%esp), %esi
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 216(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 220(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 224(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 228(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ adcl 232(%esp), %edi
+ movl %edi, 40(%esp) # 4-byte Spill
+ movl 32(%esp), %edi # 4-byte Reload
+ adcl 236(%esp), %edi
+ movl 36(%esp), %eax # 4-byte Reload
+ adcl 240(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 244(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 248(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 252(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ adcl $0, %ecx
+ movl %ecx, 68(%esp) # 4-byte Spill
+ movl 1576(%esp), %eax
+ movl 48(%eax), %eax
+ movl %eax, (%esp)
+ leal 144(%esp), %ecx
+ movl 1572(%esp), %edx
+ calll .LmulPv416x32
+ movl 72(%esp), %ecx # 4-byte Reload
+ addl 144(%esp), %ecx
+ adcl 148(%esp), %ebp
+ movl %ebp, 80(%esp) # 4-byte Spill
+ adcl 152(%esp), %esi
+ movl %esi, 64(%esp) # 4-byte Spill
+ movl 76(%esp), %ebp # 4-byte Reload
+ adcl 156(%esp), %ebp
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 160(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 164(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 168(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 40(%esp), %eax # 4-byte Reload
+ adcl 172(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ adcl 176(%esp), %edi
+ movl %edi, 32(%esp) # 4-byte Spill
+ movl 36(%esp), %eax # 4-byte Reload
+ adcl 180(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 184(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 188(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 192(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 196(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ sbbl %edi, %edi
+ movl 28(%esp), %eax # 4-byte Reload
+ imull %ecx, %eax
+ movl %ecx, %esi
+ movl %eax, (%esp)
+ leal 88(%esp), %ecx
+ movl 1580(%esp), %edx
+ calll .LmulPv416x32
+ andl $1, %edi
+ addl 88(%esp), %esi
+ movl 80(%esp), %eax # 4-byte Reload
+ movl 84(%esp), %esi # 4-byte Reload
+ adcl 92(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 64(%esp), %ecx # 4-byte Reload
+ adcl 96(%esp), %ecx
+ movl %ecx, 64(%esp) # 4-byte Spill
+ adcl 100(%esp), %ebp
+ movl %ebp, 76(%esp) # 4-byte Spill
+ adcl 104(%esp), %esi
+ movl %esi, 84(%esp) # 4-byte Spill
+ movl 60(%esp), %edx # 4-byte Reload
+ adcl 108(%esp), %edx
+ movl %edx, 60(%esp) # 4-byte Spill
+ movl 48(%esp), %ebx # 4-byte Reload
+ adcl 112(%esp), %ebx
+ movl %ebx, 48(%esp) # 4-byte Spill
+ movl 40(%esp), %ebx # 4-byte Reload
+ adcl 116(%esp), %ebx
+ movl %ebx, 40(%esp) # 4-byte Spill
+ movl 32(%esp), %ebx # 4-byte Reload
+ adcl 120(%esp), %ebx
+ movl %ebx, 32(%esp) # 4-byte Spill
+ movl 36(%esp), %ebx # 4-byte Reload
+ adcl 124(%esp), %ebx
+ movl %ebx, 36(%esp) # 4-byte Spill
+ movl 44(%esp), %ebx # 4-byte Reload
+ adcl 128(%esp), %ebx
+ movl %ebx, 44(%esp) # 4-byte Spill
+ movl 56(%esp), %ebx # 4-byte Reload
+ adcl 132(%esp), %ebx
+ movl %ebx, 56(%esp) # 4-byte Spill
+ movl 52(%esp), %ebx # 4-byte Reload
+ adcl 136(%esp), %ebx
+ movl %ebx, 52(%esp) # 4-byte Spill
+ movl 68(%esp), %ebx # 4-byte Reload
+ adcl 140(%esp), %ebx
+ movl %ebx, 68(%esp) # 4-byte Spill
+ adcl $0, %edi
+ movl 1580(%esp), %ebx
+ subl (%ebx), %eax
+ sbbl 4(%ebx), %ecx
+ sbbl 8(%ebx), %ebp
+ sbbl 12(%ebx), %esi
+ movl %esi, 4(%esp) # 4-byte Spill
+ sbbl 16(%ebx), %edx
+ movl %edx, 8(%esp) # 4-byte Spill
+ movl 48(%esp), %edx # 4-byte Reload
+ sbbl 20(%ebx), %edx
+ movl %edx, 12(%esp) # 4-byte Spill
+ movl 40(%esp), %edx # 4-byte Reload
+ sbbl 24(%ebx), %edx
+ movl %edx, 16(%esp) # 4-byte Spill
+ movl 32(%esp), %edx # 4-byte Reload
+ sbbl 28(%ebx), %edx
+ movl 36(%esp), %esi # 4-byte Reload
+ sbbl 32(%ebx), %esi
+ movl %esi, 20(%esp) # 4-byte Spill
+ movl 44(%esp), %esi # 4-byte Reload
+ sbbl 36(%ebx), %esi
+ movl %esi, 24(%esp) # 4-byte Spill
+ movl 56(%esp), %esi # 4-byte Reload
+ sbbl 40(%ebx), %esi
+ movl %esi, 28(%esp) # 4-byte Spill
+ movl 52(%esp), %esi # 4-byte Reload
+ sbbl 44(%ebx), %esi
+ movl %esi, 72(%esp) # 4-byte Spill
+ movl 68(%esp), %esi # 4-byte Reload
+ sbbl 48(%ebx), %esi
+ movl %esi, %ebx
+ sbbl $0, %edi
+ andl $1, %edi
+ jne .LBB194_2
+# BB#1:
+ movl %edx, 32(%esp) # 4-byte Spill
+.LBB194_2:
+ movl %edi, %edx
+ testb %dl, %dl
+ movl 80(%esp), %edx # 4-byte Reload
+ jne .LBB194_4
+# BB#3:
+ movl %eax, %edx
+.LBB194_4:
+ movl 1568(%esp), %eax
+ movl %edx, (%eax)
+ movl 64(%esp), %esi # 4-byte Reload
+ jne .LBB194_6
+# BB#5:
+ movl %ecx, %esi
+.LBB194_6:
+ movl %esi, 4(%eax)
+ jne .LBB194_8
+# BB#7:
+ movl %ebp, 76(%esp) # 4-byte Spill
+.LBB194_8:
+ movl 76(%esp), %ecx # 4-byte Reload
+ movl %ecx, 8(%eax)
+ movl 60(%esp), %ebp # 4-byte Reload
+ jne .LBB194_10
+# BB#9:
+ movl 4(%esp), %ecx # 4-byte Reload
+ movl %ecx, 84(%esp) # 4-byte Spill
+.LBB194_10:
+ movl 84(%esp), %ecx # 4-byte Reload
+ movl %ecx, 12(%eax)
+ jne .LBB194_12
+# BB#11:
+ movl 8(%esp), %ebp # 4-byte Reload
+.LBB194_12:
+ movl %ebp, 16(%eax)
+ movl 48(%esp), %ecx # 4-byte Reload
+ jne .LBB194_14
+# BB#13:
+ movl 12(%esp), %ecx # 4-byte Reload
+.LBB194_14:
+ movl %ecx, 20(%eax)
+ movl 40(%esp), %ecx # 4-byte Reload
+ jne .LBB194_16
+# BB#15:
+ movl 16(%esp), %ecx # 4-byte Reload
+.LBB194_16:
+ movl %ecx, 24(%eax)
+ movl 32(%esp), %ecx # 4-byte Reload
+ movl %ecx, 28(%eax)
+ movl 36(%esp), %ecx # 4-byte Reload
+ jne .LBB194_18
+# BB#17:
+ movl 20(%esp), %ecx # 4-byte Reload
+.LBB194_18:
+ movl %ecx, 32(%eax)
+ movl 44(%esp), %ecx # 4-byte Reload
+ jne .LBB194_20
+# BB#19:
+ movl 24(%esp), %ecx # 4-byte Reload
+.LBB194_20:
+ movl %ecx, 36(%eax)
+ movl 56(%esp), %ecx # 4-byte Reload
+ jne .LBB194_22
+# BB#21:
+ movl 28(%esp), %ecx # 4-byte Reload
+.LBB194_22:
+ movl %ecx, 40(%eax)
+ movl 52(%esp), %ecx # 4-byte Reload
+ jne .LBB194_24
+# BB#23:
+ movl 72(%esp), %ecx # 4-byte Reload
+.LBB194_24:
+ movl %ecx, 44(%eax)
+ movl 68(%esp), %ecx # 4-byte Reload
+ jne .LBB194_26
+# BB#25:
+ movl %ebx, %ecx
+.LBB194_26:
+ movl %ecx, 48(%eax)
+ addl $1548, %esp # imm = 0x60C
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end194:
+ .size mcl_fp_mont13L, .Lfunc_end194-mcl_fp_mont13L
+
+ .globl mcl_fp_montNF13L
+ .align 16, 0x90
+ .type mcl_fp_montNF13L,@function
+mcl_fp_montNF13L: # @mcl_fp_montNF13L
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $1548, %esp # imm = 0x60C
+ calll .L195$pb
+.L195$pb:
+ popl %ebx
+.Ltmp36:
+ addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp36-.L195$pb), %ebx
+ movl 1580(%esp), %eax
+ movl -4(%eax), %esi
+ movl %esi, 32(%esp) # 4-byte Spill
+ movl 1576(%esp), %eax
+ movl (%eax), %eax
+ movl %eax, (%esp)
+ leal 1488(%esp), %ecx
+ movl 1572(%esp), %edx
+ calll .LmulPv416x32
+ movl 1488(%esp), %edi
+ movl 1492(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl %edi, %eax
+ imull %esi, %eax
+ movl 1540(%esp), %ecx
+ movl %ecx, 84(%esp) # 4-byte Spill
+ movl 1536(%esp), %ecx
+ movl %ecx, 76(%esp) # 4-byte Spill
+ movl 1532(%esp), %ecx
+ movl %ecx, 72(%esp) # 4-byte Spill
+ movl 1528(%esp), %ecx
+ movl %ecx, 60(%esp) # 4-byte Spill
+ movl 1524(%esp), %ecx
+ movl %ecx, 56(%esp) # 4-byte Spill
+ movl 1520(%esp), %ecx
+ movl %ecx, 52(%esp) # 4-byte Spill
+ movl 1516(%esp), %ecx
+ movl %ecx, 44(%esp) # 4-byte Spill
+ movl 1512(%esp), %ecx
+ movl %ecx, 36(%esp) # 4-byte Spill
+ movl 1508(%esp), %ecx
+ movl %ecx, 48(%esp) # 4-byte Spill
+ movl 1504(%esp), %ecx
+ movl %ecx, 40(%esp) # 4-byte Spill
+ movl 1500(%esp), %esi
+ movl 1496(%esp), %ebp
+ movl %eax, (%esp)
+ leal 1432(%esp), %ecx
+ movl 1580(%esp), %edx
+ calll .LmulPv416x32
+ addl 1432(%esp), %edi
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 1436(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ adcl 1440(%esp), %ebp
+ adcl 1444(%esp), %esi
+ movl %esi, 64(%esp) # 4-byte Spill
+ movl 40(%esp), %eax # 4-byte Reload
+ adcl 1448(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 1452(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 36(%esp), %eax # 4-byte Reload
+ adcl 1456(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 1460(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 1464(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 1468(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 60(%esp), %esi # 4-byte Reload
+ adcl 1472(%esp), %esi
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 1476(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 1480(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 84(%esp), %edi # 4-byte Reload
+ adcl 1484(%esp), %edi
+ movl 1576(%esp), %eax
+ movl 4(%eax), %eax
+ movl %eax, (%esp)
+ leal 1376(%esp), %ecx
+ movl 1572(%esp), %edx
+ calll .LmulPv416x32
+ movl 1428(%esp), %ecx
+ movl 80(%esp), %edx # 4-byte Reload
+ addl 1376(%esp), %edx
+ adcl 1380(%esp), %ebp
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 1384(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 40(%esp), %eax # 4-byte Reload
+ adcl 1388(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 1392(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 36(%esp), %eax # 4-byte Reload
+ adcl 1396(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 1400(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 1404(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 1408(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ adcl 1412(%esp), %esi
+ movl %esi, 60(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 1416(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 1420(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ adcl 1424(%esp), %edi
+ movl %edi, 84(%esp) # 4-byte Spill
+ adcl $0, %ecx
+ movl %ecx, 80(%esp) # 4-byte Spill
+ movl %edx, %esi
+ movl %esi, %eax
+ imull 32(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 1320(%esp), %ecx
+ movl 1580(%esp), %edx
+ calll .LmulPv416x32
+ addl 1320(%esp), %esi
+ adcl 1324(%esp), %ebp
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 1328(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 40(%esp), %eax # 4-byte Reload
+ adcl 1332(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 1336(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 36(%esp), %esi # 4-byte Reload
+ adcl 1340(%esp), %esi
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 1344(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 1348(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 1352(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 1356(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 72(%esp), %edi # 4-byte Reload
+ adcl 1360(%esp), %edi
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 1364(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 1368(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 1372(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 1576(%esp), %eax
+ movl 8(%eax), %eax
+ movl %eax, (%esp)
+ leal 1264(%esp), %ecx
+ movl 1572(%esp), %edx
+ calll .LmulPv416x32
+ movl 1316(%esp), %eax
+ addl 1264(%esp), %ebp
+ movl 64(%esp), %ecx # 4-byte Reload
+ adcl 1268(%esp), %ecx
+ movl %ecx, 64(%esp) # 4-byte Spill
+ movl 40(%esp), %ecx # 4-byte Reload
+ adcl 1272(%esp), %ecx
+ movl %ecx, 40(%esp) # 4-byte Spill
+ movl 48(%esp), %ecx # 4-byte Reload
+ adcl 1276(%esp), %ecx
+ movl %ecx, 48(%esp) # 4-byte Spill
+ adcl 1280(%esp), %esi
+ movl %esi, 36(%esp) # 4-byte Spill
+ movl 44(%esp), %esi # 4-byte Reload
+ adcl 1284(%esp), %esi
+ movl 52(%esp), %ecx # 4-byte Reload
+ adcl 1288(%esp), %ecx
+ movl %ecx, 52(%esp) # 4-byte Spill
+ movl 56(%esp), %ecx # 4-byte Reload
+ adcl 1292(%esp), %ecx
+ movl %ecx, 56(%esp) # 4-byte Spill
+ movl 60(%esp), %ecx # 4-byte Reload
+ adcl 1296(%esp), %ecx
+ movl %ecx, 60(%esp) # 4-byte Spill
+ adcl 1300(%esp), %edi
+ movl 76(%esp), %ecx # 4-byte Reload
+ adcl 1304(%esp), %ecx
+ movl %ecx, 76(%esp) # 4-byte Spill
+ movl 84(%esp), %ecx # 4-byte Reload
+ adcl 1308(%esp), %ecx
+ movl %ecx, 84(%esp) # 4-byte Spill
+ movl 80(%esp), %ecx # 4-byte Reload
+ adcl 1312(%esp), %ecx
+ movl %ecx, 80(%esp) # 4-byte Spill
+ adcl $0, %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl %ebp, %eax
+ imull 32(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 1208(%esp), %ecx
+ movl 1580(%esp), %edx
+ calll .LmulPv416x32
+ addl 1208(%esp), %ebp
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 1212(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 40(%esp), %eax # 4-byte Reload
+ adcl 1216(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 1220(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 36(%esp), %eax # 4-byte Reload
+ adcl 1224(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ adcl 1228(%esp), %esi
+ movl %esi, 44(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 1232(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 1236(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 1240(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ adcl 1244(%esp), %edi
+ movl %edi, 72(%esp) # 4-byte Spill
+ movl 76(%esp), %esi # 4-byte Reload
+ adcl 1248(%esp), %esi
+ movl 84(%esp), %edi # 4-byte Reload
+ adcl 1252(%esp), %edi
+ movl 80(%esp), %ebp # 4-byte Reload
+ adcl 1256(%esp), %ebp
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 1260(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 1576(%esp), %eax
+ movl 12(%eax), %eax
+ movl %eax, (%esp)
+ leal 1152(%esp), %ecx
+ movl 1572(%esp), %edx
+ calll .LmulPv416x32
+ movl 1204(%esp), %eax
+ movl 64(%esp), %edx # 4-byte Reload
+ addl 1152(%esp), %edx
+ movl 40(%esp), %ecx # 4-byte Reload
+ adcl 1156(%esp), %ecx
+ movl %ecx, 40(%esp) # 4-byte Spill
+ movl 48(%esp), %ecx # 4-byte Reload
+ adcl 1160(%esp), %ecx
+ movl %ecx, 48(%esp) # 4-byte Spill
+ movl 36(%esp), %ecx # 4-byte Reload
+ adcl 1164(%esp), %ecx
+ movl %ecx, 36(%esp) # 4-byte Spill
+ movl 44(%esp), %ecx # 4-byte Reload
+ adcl 1168(%esp), %ecx
+ movl %ecx, 44(%esp) # 4-byte Spill
+ movl 52(%esp), %ecx # 4-byte Reload
+ adcl 1172(%esp), %ecx
+ movl %ecx, 52(%esp) # 4-byte Spill
+ movl 56(%esp), %ecx # 4-byte Reload
+ adcl 1176(%esp), %ecx
+ movl %ecx, 56(%esp) # 4-byte Spill
+ movl 60(%esp), %ecx # 4-byte Reload
+ adcl 1180(%esp), %ecx
+ movl %ecx, 60(%esp) # 4-byte Spill
+ movl 72(%esp), %ecx # 4-byte Reload
+ adcl 1184(%esp), %ecx
+ movl %ecx, 72(%esp) # 4-byte Spill
+ adcl 1188(%esp), %esi
+ movl %esi, 76(%esp) # 4-byte Spill
+ adcl 1192(%esp), %edi
+ movl %edi, 84(%esp) # 4-byte Spill
+ adcl 1196(%esp), %ebp
+ movl 68(%esp), %ecx # 4-byte Reload
+ adcl 1200(%esp), %ecx
+ movl %ecx, 68(%esp) # 4-byte Spill
+ adcl $0, %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl %edx, %esi
+ movl %esi, %eax
+ imull 32(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 1096(%esp), %ecx
+ movl 1580(%esp), %eax
+ movl %eax, %edx
+ calll .LmulPv416x32
+ addl 1096(%esp), %esi
+ movl 40(%esp), %eax # 4-byte Reload
+ adcl 1100(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 1104(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 36(%esp), %eax # 4-byte Reload
+ adcl 1108(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 1112(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 52(%esp), %esi # 4-byte Reload
+ adcl 1116(%esp), %esi
+ movl 56(%esp), %edi # 4-byte Reload
+ adcl 1120(%esp), %edi
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 1124(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 1128(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 1132(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 1136(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ adcl 1140(%esp), %ebp
+ movl %ebp, 80(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 1144(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 64(%esp), %ebp # 4-byte Reload
+ adcl 1148(%esp), %ebp
+ movl 1576(%esp), %eax
+ movl 16(%eax), %eax
+ movl %eax, (%esp)
+ leal 1040(%esp), %ecx
+ movl 1572(%esp), %edx
+ calll .LmulPv416x32
+ movl 1092(%esp), %eax
+ movl 40(%esp), %edx # 4-byte Reload
+ addl 1040(%esp), %edx
+ movl 48(%esp), %ecx # 4-byte Reload
+ adcl 1044(%esp), %ecx
+ movl %ecx, 48(%esp) # 4-byte Spill
+ movl 36(%esp), %ecx # 4-byte Reload
+ adcl 1048(%esp), %ecx
+ movl %ecx, 36(%esp) # 4-byte Spill
+ movl 44(%esp), %ecx # 4-byte Reload
+ adcl 1052(%esp), %ecx
+ movl %ecx, 44(%esp) # 4-byte Spill
+ adcl 1056(%esp), %esi
+ movl %esi, 52(%esp) # 4-byte Spill
+ adcl 1060(%esp), %edi
+ movl %edi, 56(%esp) # 4-byte Spill
+ movl 60(%esp), %ecx # 4-byte Reload
+ adcl 1064(%esp), %ecx
+ movl %ecx, 60(%esp) # 4-byte Spill
+ movl 72(%esp), %ecx # 4-byte Reload
+ adcl 1068(%esp), %ecx
+ movl %ecx, 72(%esp) # 4-byte Spill
+ movl 76(%esp), %ecx # 4-byte Reload
+ adcl 1072(%esp), %ecx
+ movl %ecx, 76(%esp) # 4-byte Spill
+ movl 84(%esp), %ecx # 4-byte Reload
+ adcl 1076(%esp), %ecx
+ movl %ecx, 84(%esp) # 4-byte Spill
+ movl 80(%esp), %ecx # 4-byte Reload
+ adcl 1080(%esp), %ecx
+ movl %ecx, 80(%esp) # 4-byte Spill
+ movl 68(%esp), %ecx # 4-byte Reload
+ adcl 1084(%esp), %ecx
+ movl %ecx, 68(%esp) # 4-byte Spill
+ adcl 1088(%esp), %ebp
+ movl %ebp, 64(%esp) # 4-byte Spill
+ movl %eax, %esi
+ adcl $0, %esi
+ movl %edx, %edi
+ movl %edi, %eax
+ imull 32(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 984(%esp), %ecx
+ movl 1580(%esp), %edx
+ calll .LmulPv416x32
+ addl 984(%esp), %edi
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 988(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 36(%esp), %eax # 4-byte Reload
+ adcl 992(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 44(%esp), %edi # 4-byte Reload
+ adcl 996(%esp), %edi
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 1000(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 1004(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 60(%esp), %ebp # 4-byte Reload
+ adcl 1008(%esp), %ebp
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 1012(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 1016(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 1020(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 1024(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 1028(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 1032(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ adcl 1036(%esp), %esi
+ movl %esi, 40(%esp) # 4-byte Spill
+ movl 1576(%esp), %eax
+ movl 20(%eax), %eax
+ movl %eax, (%esp)
+ leal 928(%esp), %ecx
+ movl 1572(%esp), %edx
+ calll .LmulPv416x32
+ movl 980(%esp), %eax
+ movl 48(%esp), %ecx # 4-byte Reload
+ addl 928(%esp), %ecx
+ movl 36(%esp), %edx # 4-byte Reload
+ adcl 932(%esp), %edx
+ movl %edx, 36(%esp) # 4-byte Spill
+ adcl 936(%esp), %edi
+ movl %edi, 44(%esp) # 4-byte Spill
+ movl 52(%esp), %edx # 4-byte Reload
+ adcl 940(%esp), %edx
+ movl %edx, 52(%esp) # 4-byte Spill
+ movl 56(%esp), %edx # 4-byte Reload
+ adcl 944(%esp), %edx
+ movl %edx, 56(%esp) # 4-byte Spill
+ adcl 948(%esp), %ebp
+ movl 72(%esp), %edx # 4-byte Reload
+ adcl 952(%esp), %edx
+ movl %edx, 72(%esp) # 4-byte Spill
+ movl 76(%esp), %edx # 4-byte Reload
+ adcl 956(%esp), %edx
+ movl %edx, 76(%esp) # 4-byte Spill
+ movl 84(%esp), %edx # 4-byte Reload
+ adcl 960(%esp), %edx
+ movl %edx, 84(%esp) # 4-byte Spill
+ movl 80(%esp), %edx # 4-byte Reload
+ adcl 964(%esp), %edx
+ movl %edx, 80(%esp) # 4-byte Spill
+ movl 68(%esp), %esi # 4-byte Reload
+ adcl 968(%esp), %esi
+ movl 64(%esp), %edx # 4-byte Reload
+ adcl 972(%esp), %edx
+ movl %edx, 64(%esp) # 4-byte Spill
+ movl 40(%esp), %edx # 4-byte Reload
+ adcl 976(%esp), %edx
+ movl %edx, 40(%esp) # 4-byte Spill
+ adcl $0, %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl %ecx, %edi
+ movl %edi, %eax
+ imull 32(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 872(%esp), %ecx
+ movl 1580(%esp), %edx
+ calll .LmulPv416x32
+ addl 872(%esp), %edi
+ movl 36(%esp), %edi # 4-byte Reload
+ adcl 876(%esp), %edi
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 880(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 884(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 888(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ adcl 892(%esp), %ebp
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 896(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 900(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 904(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 908(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ adcl 912(%esp), %esi
+ movl %esi, 68(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 916(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 40(%esp), %eax # 4-byte Reload
+ adcl 920(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 924(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 1576(%esp), %eax
+ movl 24(%eax), %eax
+ movl %eax, (%esp)
+ leal 816(%esp), %ecx
+ movl 1572(%esp), %edx
+ calll .LmulPv416x32
+ movl 868(%esp), %edx
+ addl 816(%esp), %edi
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 820(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 824(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 828(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ adcl 832(%esp), %ebp
+ movl %ebp, 60(%esp) # 4-byte Spill
+ movl 72(%esp), %esi # 4-byte Reload
+ adcl 836(%esp), %esi
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 840(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 844(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 848(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 852(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 856(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 40(%esp), %ebp # 4-byte Reload
+ adcl 860(%esp), %ebp
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 864(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ adcl $0, %edx
+ movl %edx, 36(%esp) # 4-byte Spill
+ movl %edi, %eax
+ imull 32(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 760(%esp), %ecx
+ movl 1580(%esp), %edx
+ calll .LmulPv416x32
+ addl 760(%esp), %edi
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 764(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 768(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 772(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 776(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ adcl 780(%esp), %esi
+ movl %esi, 72(%esp) # 4-byte Spill
+ movl 76(%esp), %esi # 4-byte Reload
+ adcl 784(%esp), %esi
+ movl 84(%esp), %edi # 4-byte Reload
+ adcl 788(%esp), %edi
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 792(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 796(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 800(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ adcl 804(%esp), %ebp
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 808(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 36(%esp), %eax # 4-byte Reload
+ adcl 812(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 1576(%esp), %eax
+ movl 28(%eax), %eax
+ movl %eax, (%esp)
+ leal 704(%esp), %ecx
+ movl 1572(%esp), %edx
+ calll .LmulPv416x32
+ movl 756(%esp), %eax
+ movl 44(%esp), %ecx # 4-byte Reload
+ addl 704(%esp), %ecx
+ movl 52(%esp), %edx # 4-byte Reload
+ adcl 708(%esp), %edx
+ movl %edx, 52(%esp) # 4-byte Spill
+ movl 56(%esp), %edx # 4-byte Reload
+ adcl 712(%esp), %edx
+ movl %edx, 56(%esp) # 4-byte Spill
+ movl 60(%esp), %edx # 4-byte Reload
+ adcl 716(%esp), %edx
+ movl %edx, 60(%esp) # 4-byte Spill
+ movl 72(%esp), %edx # 4-byte Reload
+ adcl 720(%esp), %edx
+ movl %edx, 72(%esp) # 4-byte Spill
+ adcl 724(%esp), %esi
+ movl %esi, 76(%esp) # 4-byte Spill
+ adcl 728(%esp), %edi
+ movl %edi, 84(%esp) # 4-byte Spill
+ movl 80(%esp), %esi # 4-byte Reload
+ adcl 732(%esp), %esi
+ movl 68(%esp), %edx # 4-byte Reload
+ adcl 736(%esp), %edx
+ movl %edx, 68(%esp) # 4-byte Spill
+ movl 64(%esp), %edx # 4-byte Reload
+ adcl 740(%esp), %edx
+ movl %edx, 64(%esp) # 4-byte Spill
+ adcl 744(%esp), %ebp
+ movl 48(%esp), %edx # 4-byte Reload
+ adcl 748(%esp), %edx
+ movl %edx, 48(%esp) # 4-byte Spill
+ movl 36(%esp), %edx # 4-byte Reload
+ adcl 752(%esp), %edx
+ movl %edx, 36(%esp) # 4-byte Spill
+ adcl $0, %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ movl %ecx, %edi
+ imull 32(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 648(%esp), %ecx
+ movl 1580(%esp), %edx
+ calll .LmulPv416x32
+ addl 648(%esp), %edi
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 652(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 656(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 660(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 664(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 668(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 672(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ adcl 676(%esp), %esi
+ movl %esi, 80(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 680(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 684(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ adcl 688(%esp), %ebp
+ movl %ebp, 40(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 692(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 36(%esp), %edi # 4-byte Reload
+ adcl 696(%esp), %edi
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 700(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 1576(%esp), %eax
+ movl 32(%eax), %eax
+ movl %eax, (%esp)
+ leal 592(%esp), %ecx
+ movl 1572(%esp), %edx
+ calll .LmulPv416x32
+ movl 644(%esp), %edx
+ movl 52(%esp), %ecx # 4-byte Reload
+ addl 592(%esp), %ecx
+ movl 56(%esp), %esi # 4-byte Reload
+ adcl 596(%esp), %esi
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 600(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 604(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 608(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 612(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 616(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 620(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 64(%esp), %ebp # 4-byte Reload
+ adcl 624(%esp), %ebp
+ movl 40(%esp), %eax # 4-byte Reload
+ adcl 628(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 632(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ adcl 636(%esp), %edi
+ movl %edi, 36(%esp) # 4-byte Spill
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 640(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ adcl $0, %edx
+ movl %edx, 52(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ movl %ecx, %edi
+ imull 32(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 536(%esp), %ecx
+ movl 1580(%esp), %edx
+ calll .LmulPv416x32
+ addl 536(%esp), %edi
+ adcl 540(%esp), %esi
+ movl %esi, 56(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 544(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 548(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 552(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 84(%esp), %edi # 4-byte Reload
+ adcl 556(%esp), %edi
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 560(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 68(%esp), %esi # 4-byte Reload
+ adcl 564(%esp), %esi
+ adcl 568(%esp), %ebp
+ movl %ebp, 64(%esp) # 4-byte Spill
+ movl 40(%esp), %ebp # 4-byte Reload
+ adcl 572(%esp), %ebp
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 576(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 36(%esp), %eax # 4-byte Reload
+ adcl 580(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 584(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 588(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 1576(%esp), %eax
+ movl 36(%eax), %eax
+ movl %eax, (%esp)
+ leal 480(%esp), %ecx
+ movl 1572(%esp), %edx
+ calll .LmulPv416x32
+ movl 532(%esp), %edx
+ movl 56(%esp), %ecx # 4-byte Reload
+ addl 480(%esp), %ecx
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 484(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 488(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 492(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ adcl 496(%esp), %edi
+ movl %edi, 84(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 500(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ adcl 504(%esp), %esi
+ movl %esi, 68(%esp) # 4-byte Spill
+ movl 64(%esp), %edi # 4-byte Reload
+ adcl 508(%esp), %edi
+ adcl 512(%esp), %ebp
+ movl %ebp, 40(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 516(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 36(%esp), %eax # 4-byte Reload
+ adcl 520(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 524(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 52(%esp), %ebp # 4-byte Reload
+ adcl 528(%esp), %ebp
+ adcl $0, %edx
+ movl %edx, 56(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ movl %ecx, %esi
+ imull 32(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 424(%esp), %ecx
+ movl 1580(%esp), %edx
+ calll .LmulPv416x32
+ addl 424(%esp), %esi
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 428(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 432(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 436(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 440(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 444(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 448(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl %edi, %esi
+ adcl 452(%esp), %esi
+ movl 40(%esp), %eax # 4-byte Reload
+ adcl 456(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 48(%esp), %edi # 4-byte Reload
+ adcl 460(%esp), %edi
+ movl 36(%esp), %eax # 4-byte Reload
+ adcl 464(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 468(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ adcl 472(%esp), %ebp
+ movl %ebp, 52(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 476(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 1576(%esp), %eax
+ movl 40(%eax), %eax
+ movl %eax, (%esp)
+ leal 368(%esp), %ecx
+ movl 1572(%esp), %edx
+ calll .LmulPv416x32
+ movl 420(%esp), %edx
+ movl 60(%esp), %ecx # 4-byte Reload
+ addl 368(%esp), %ecx
+ movl 72(%esp), %ebp # 4-byte Reload
+ adcl 372(%esp), %ebp
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 376(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 380(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 384(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 388(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ adcl 392(%esp), %esi
+ movl %esi, 64(%esp) # 4-byte Spill
+ movl 40(%esp), %eax # 4-byte Reload
+ adcl 396(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ adcl 400(%esp), %edi
+ movl %edi, 48(%esp) # 4-byte Spill
+ movl 36(%esp), %edi # 4-byte Reload
+ adcl 404(%esp), %edi
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 408(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 412(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 416(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ adcl $0, %edx
+ movl %edx, 60(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ movl %ecx, %esi
+ imull 32(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 312(%esp), %ecx
+ movl 1580(%esp), %edx
+ calll .LmulPv416x32
+ addl 312(%esp), %esi
+ adcl 316(%esp), %ebp
+ movl %ebp, 72(%esp) # 4-byte Spill
+ movl 76(%esp), %ebp # 4-byte Reload
+ adcl 320(%esp), %ebp
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 324(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 328(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 68(%esp), %esi # 4-byte Reload
+ adcl 332(%esp), %esi
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 336(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 40(%esp), %eax # 4-byte Reload
+ adcl 340(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 344(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ adcl 348(%esp), %edi
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 352(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 356(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 360(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 364(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 1576(%esp), %eax
+ movl 44(%eax), %eax
+ movl %eax, (%esp)
+ leal 256(%esp), %ecx
+ movl 1572(%esp), %edx
+ calll .LmulPv416x32
+ movl 308(%esp), %edx
+ movl 72(%esp), %ecx # 4-byte Reload
+ addl 256(%esp), %ecx
+ adcl 260(%esp), %ebp
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 264(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 268(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ adcl 272(%esp), %esi
+ movl %esi, 68(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 276(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 40(%esp), %eax # 4-byte Reload
+ adcl 280(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 284(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ adcl 288(%esp), %edi
+ movl %edi, 36(%esp) # 4-byte Spill
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 292(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 296(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 300(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 304(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ adcl $0, %edx
+ movl %edx, 72(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ movl %ecx, %esi
+ imull 32(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 200(%esp), %ecx
+ movl 1580(%esp), %edx
+ calll .LmulPv416x32
+ addl 200(%esp), %esi
+ adcl 204(%esp), %ebp
+ movl %ebp, 76(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 208(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 80(%esp), %ebp # 4-byte Reload
+ adcl 212(%esp), %ebp
+ movl 68(%esp), %esi # 4-byte Reload
+ adcl 216(%esp), %esi
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 220(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 40(%esp), %eax # 4-byte Reload
+ adcl 224(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 48(%esp), %edi # 4-byte Reload
+ adcl 228(%esp), %edi
+ movl 36(%esp), %eax # 4-byte Reload
+ adcl 232(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 236(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 240(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 244(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 248(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 252(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 1576(%esp), %eax
+ movl 48(%eax), %eax
+ movl %eax, (%esp)
+ leal 144(%esp), %ecx
+ movl 1572(%esp), %edx
+ calll .LmulPv416x32
+ movl 196(%esp), %edx
+ movl 76(%esp), %ecx # 4-byte Reload
+ addl 144(%esp), %ecx
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 148(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ adcl 152(%esp), %ebp
+ movl %ebp, 80(%esp) # 4-byte Spill
+ adcl 156(%esp), %esi
+ movl %esi, 68(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 160(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 40(%esp), %ebp # 4-byte Reload
+ adcl 164(%esp), %ebp
+ adcl 168(%esp), %edi
+ movl %edi, 48(%esp) # 4-byte Spill
+ movl 36(%esp), %eax # 4-byte Reload
+ adcl 172(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 176(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 180(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 184(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 188(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 192(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ adcl $0, %edx
+ movl %edx, 76(%esp) # 4-byte Spill
+ movl 32(%esp), %eax # 4-byte Reload
+ imull %ecx, %eax
+ movl %ecx, %esi
+ movl %eax, (%esp)
+ leal 88(%esp), %ecx
+ movl 1580(%esp), %edx
+ calll .LmulPv416x32
+ addl 88(%esp), %esi
+ movl 84(%esp), %eax # 4-byte Reload
+ movl 68(%esp), %edi # 4-byte Reload
+ adcl 92(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 80(%esp), %ecx # 4-byte Reload
+ adcl 96(%esp), %ecx
+ movl %ecx, 80(%esp) # 4-byte Spill
+ adcl 100(%esp), %edi
+ movl 64(%esp), %ebx # 4-byte Reload
+ adcl 104(%esp), %ebx
+ movl %ebx, 64(%esp) # 4-byte Spill
+ adcl 108(%esp), %ebp
+ movl %ebp, 40(%esp) # 4-byte Spill
+ movl %ebp, %esi
+ movl 48(%esp), %edx # 4-byte Reload
+ adcl 112(%esp), %edx
+ movl %edx, 48(%esp) # 4-byte Spill
+ movl 36(%esp), %edx # 4-byte Reload
+ adcl 116(%esp), %edx
+ movl %edx, 36(%esp) # 4-byte Spill
+ movl 44(%esp), %edx # 4-byte Reload
+ adcl 120(%esp), %edx
+ movl %edx, 44(%esp) # 4-byte Spill
+ movl 52(%esp), %edx # 4-byte Reload
+ adcl 124(%esp), %edx
+ movl %edx, 52(%esp) # 4-byte Spill
+ movl 56(%esp), %edx # 4-byte Reload
+ adcl 128(%esp), %edx
+ movl %edx, 56(%esp) # 4-byte Spill
+ movl 60(%esp), %edx # 4-byte Reload
+ adcl 132(%esp), %edx
+ movl %edx, 60(%esp) # 4-byte Spill
+ movl 72(%esp), %edx # 4-byte Reload
+ adcl 136(%esp), %edx
+ movl %edx, 72(%esp) # 4-byte Spill
+ movl 76(%esp), %edx # 4-byte Reload
+ adcl 140(%esp), %edx
+ movl %edx, 76(%esp) # 4-byte Spill
+ movl %eax, %edx
+ movl 1580(%esp), %eax
+ subl (%eax), %edx
+ movl %ecx, %ebp
+ sbbl 4(%eax), %ebp
+ movl %edi, %ecx
+ sbbl 8(%eax), %ecx
+ sbbl 12(%eax), %ebx
+ sbbl 16(%eax), %esi
+ movl %esi, 4(%esp) # 4-byte Spill
+ movl 48(%esp), %esi # 4-byte Reload
+ sbbl 20(%eax), %esi
+ movl %esi, 8(%esp) # 4-byte Spill
+ movl 36(%esp), %esi # 4-byte Reload
+ sbbl 24(%eax), %esi
+ movl %esi, 12(%esp) # 4-byte Spill
+ movl 44(%esp), %esi # 4-byte Reload
+ sbbl 28(%eax), %esi
+ movl %esi, 16(%esp) # 4-byte Spill
+ movl 52(%esp), %esi # 4-byte Reload
+ sbbl 32(%eax), %esi
+ movl %esi, 20(%esp) # 4-byte Spill
+ movl 56(%esp), %esi # 4-byte Reload
+ sbbl 36(%eax), %esi
+ movl %esi, 24(%esp) # 4-byte Spill
+ movl 60(%esp), %esi # 4-byte Reload
+ sbbl 40(%eax), %esi
+ movl %esi, 28(%esp) # 4-byte Spill
+ movl 72(%esp), %esi # 4-byte Reload
+ sbbl 44(%eax), %esi
+ movl %esi, 32(%esp) # 4-byte Spill
+ movl 76(%esp), %esi # 4-byte Reload
+ sbbl 48(%eax), %esi
+ movl %esi, 68(%esp) # 4-byte Spill
+ movl %esi, %eax
+ sarl $31, %eax
+ testl %eax, %eax
+ movl 84(%esp), %eax # 4-byte Reload
+ js .LBB195_2
+# BB#1:
+ movl %edx, %eax
+.LBB195_2:
+ movl 1568(%esp), %edx
+ movl %eax, (%edx)
+ movl 80(%esp), %esi # 4-byte Reload
+ js .LBB195_4
+# BB#3:
+ movl %ebp, %esi
+.LBB195_4:
+ movl %esi, 4(%edx)
+ movl 64(%esp), %eax # 4-byte Reload
+ js .LBB195_6
+# BB#5:
+ movl %ecx, %edi
+.LBB195_6:
+ movl %edi, 8(%edx)
+ js .LBB195_8
+# BB#7:
+ movl %ebx, %eax
+.LBB195_8:
+ movl %eax, 12(%edx)
+ movl 40(%esp), %eax # 4-byte Reload
+ js .LBB195_10
+# BB#9:
+ movl 4(%esp), %eax # 4-byte Reload
+.LBB195_10:
+ movl %eax, 16(%edx)
+ movl 48(%esp), %eax # 4-byte Reload
+ js .LBB195_12
+# BB#11:
+ movl 8(%esp), %eax # 4-byte Reload
+.LBB195_12:
+ movl %eax, 20(%edx)
+ movl 36(%esp), %eax # 4-byte Reload
+ js .LBB195_14
+# BB#13:
+ movl 12(%esp), %eax # 4-byte Reload
+.LBB195_14:
+ movl %eax, 24(%edx)
+ movl 44(%esp), %eax # 4-byte Reload
+ js .LBB195_16
+# BB#15:
+ movl 16(%esp), %eax # 4-byte Reload
+.LBB195_16:
+ movl %eax, 28(%edx)
+ movl 52(%esp), %eax # 4-byte Reload
+ js .LBB195_18
+# BB#17:
+ movl 20(%esp), %eax # 4-byte Reload
+.LBB195_18:
+ movl %eax, 32(%edx)
+ movl 56(%esp), %eax # 4-byte Reload
+ js .LBB195_20
+# BB#19:
+ movl 24(%esp), %eax # 4-byte Reload
+.LBB195_20:
+ movl %eax, 36(%edx)
+ movl 60(%esp), %eax # 4-byte Reload
+ js .LBB195_22
+# BB#21:
+ movl 28(%esp), %eax # 4-byte Reload
+.LBB195_22:
+ movl %eax, 40(%edx)
+ movl 72(%esp), %eax # 4-byte Reload
+ js .LBB195_24
+# BB#23:
+ movl 32(%esp), %eax # 4-byte Reload
+.LBB195_24:
+ movl %eax, 44(%edx)
+ movl 76(%esp), %eax # 4-byte Reload
+ js .LBB195_26
+# BB#25:
+ movl 68(%esp), %eax # 4-byte Reload
+.LBB195_26:
+ movl %eax, 48(%edx)
+ addl $1548, %esp # imm = 0x60C
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end195:
+ .size mcl_fp_montNF13L, .Lfunc_end195-mcl_fp_montNF13L
+
+ .globl mcl_fp_montRed13L
+ .align 16, 0x90
+ .type mcl_fp_montRed13L,@function
+mcl_fp_montRed13L: # @mcl_fp_montRed13L
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $892, %esp # imm = 0x37C
+ calll .L196$pb
+.L196$pb:
+ popl %eax
+.Ltmp37:
+ addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp37-.L196$pb), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 920(%esp), %edx
+ movl -4(%edx), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 916(%esp), %ecx
+ movl (%ecx), %ebx
+ movl %ebx, 76(%esp) # 4-byte Spill
+ movl 4(%ecx), %edi
+ movl %edi, 80(%esp) # 4-byte Spill
+ imull %eax, %ebx
+ movl 100(%ecx), %esi
+ movl %esi, 92(%esp) # 4-byte Spill
+ movl 96(%ecx), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 92(%ecx), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 88(%ecx), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 84(%ecx), %esi
+ movl %esi, 116(%esp) # 4-byte Spill
+ movl 80(%ecx), %esi
+ movl %esi, 136(%esp) # 4-byte Spill
+ movl 76(%ecx), %esi
+ movl %esi, 144(%esp) # 4-byte Spill
+ movl 72(%ecx), %eax
+ movl %eax, 132(%esp) # 4-byte Spill
+ movl 68(%ecx), %esi
+ movl %esi, 128(%esp) # 4-byte Spill
+ movl 64(%ecx), %esi
+ movl %esi, 148(%esp) # 4-byte Spill
+ movl 60(%ecx), %esi
+ movl %esi, 152(%esp) # 4-byte Spill
+ movl 56(%ecx), %esi
+ movl %esi, 140(%esp) # 4-byte Spill
+ movl 52(%ecx), %esi
+ movl %esi, 156(%esp) # 4-byte Spill
+ movl 48(%ecx), %eax
+ movl %eax, 120(%esp) # 4-byte Spill
+ movl 44(%ecx), %ebp
+ movl %ebp, 124(%esp) # 4-byte Spill
+ movl 40(%ecx), %ebp
+ movl %ebp, 108(%esp) # 4-byte Spill
+ movl 36(%ecx), %ebp
+ movl %ebp, 104(%esp) # 4-byte Spill
+ movl 32(%ecx), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 28(%ecx), %ebp
+ movl 24(%ecx), %edi
+ movl 20(%ecx), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 16(%ecx), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 12(%ecx), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 8(%ecx), %esi
+ movl (%edx), %ecx
+ movl %ecx, 12(%esp) # 4-byte Spill
+ movl 48(%edx), %ecx
+ movl %ecx, 52(%esp) # 4-byte Spill
+ movl 44(%edx), %ecx
+ movl %ecx, 48(%esp) # 4-byte Spill
+ movl 40(%edx), %ecx
+ movl %ecx, 44(%esp) # 4-byte Spill
+ movl 36(%edx), %ecx
+ movl %ecx, 40(%esp) # 4-byte Spill
+ movl 32(%edx), %ecx
+ movl %ecx, 36(%esp) # 4-byte Spill
+ movl 28(%edx), %ecx
+ movl %ecx, 32(%esp) # 4-byte Spill
+ movl 24(%edx), %ecx
+ movl %ecx, 28(%esp) # 4-byte Spill
+ movl 20(%edx), %ecx
+ movl %ecx, 24(%esp) # 4-byte Spill
+ movl 16(%edx), %ecx
+ movl %ecx, 20(%esp) # 4-byte Spill
+ movl 12(%edx), %ecx
+ movl %ecx, 16(%esp) # 4-byte Spill
+ movl 8(%edx), %ecx
+ movl %ecx, 8(%esp) # 4-byte Spill
+ movl 4(%edx), %ecx
+ movl %ecx, 4(%esp) # 4-byte Spill
+ movl %ebx, (%esp)
+ leal 832(%esp), %ecx
+ movl 84(%esp), %ebx # 4-byte Reload
+ calll .LmulPv416x32
+ movl 76(%esp), %eax # 4-byte Reload
+ addl 832(%esp), %eax
+ movl 80(%esp), %ecx # 4-byte Reload
+ adcl 836(%esp), %ecx
+ adcl 840(%esp), %esi
+ movl %esi, 80(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 844(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 848(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 852(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ adcl 856(%esp), %edi
+ movl %edi, 68(%esp) # 4-byte Spill
+ adcl 860(%esp), %ebp
+ movl %ebp, 76(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 864(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 868(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 872(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 124(%esp), %eax # 4-byte Reload
+ adcl 876(%esp), %eax
+ movl %eax, 124(%esp) # 4-byte Spill
+ movl 120(%esp), %eax # 4-byte Reload
+ adcl 880(%esp), %eax
+ movl %eax, 120(%esp) # 4-byte Spill
+ movl 156(%esp), %eax # 4-byte Reload
+ adcl 884(%esp), %eax
+ movl %eax, 156(%esp) # 4-byte Spill
+ adcl $0, 140(%esp) # 4-byte Folded Spill
+ adcl $0, 152(%esp) # 4-byte Folded Spill
+ adcl $0, 148(%esp) # 4-byte Folded Spill
+ adcl $0, 128(%esp) # 4-byte Folded Spill
+ adcl $0, 132(%esp) # 4-byte Folded Spill
+ adcl $0, 144(%esp) # 4-byte Folded Spill
+ adcl $0, 136(%esp) # 4-byte Folded Spill
+ adcl $0, 116(%esp) # 4-byte Folded Spill
+ movl 112(%esp), %ebp # 4-byte Reload
+ adcl $0, %ebp
+ adcl $0, 96(%esp) # 4-byte Folded Spill
+ adcl $0, 100(%esp) # 4-byte Folded Spill
+ adcl $0, 92(%esp) # 4-byte Folded Spill
+ sbbl %esi, %esi
+ movl %ecx, %eax
+ movl %ecx, %edi
+ imull 72(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 776(%esp), %ecx
+ movl 920(%esp), %edx
+ movl 84(%esp), %ebx # 4-byte Reload
+ calll .LmulPv416x32
+ andl $1, %esi
+ addl 776(%esp), %edi
+ movl 80(%esp), %ecx # 4-byte Reload
+ adcl 780(%esp), %ecx
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 784(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 788(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 792(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 796(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 800(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 804(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 808(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 812(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 124(%esp), %eax # 4-byte Reload
+ adcl 816(%esp), %eax
+ movl %eax, 124(%esp) # 4-byte Spill
+ movl 120(%esp), %eax # 4-byte Reload
+ adcl 820(%esp), %eax
+ movl %eax, 120(%esp) # 4-byte Spill
+ movl 156(%esp), %eax # 4-byte Reload
+ adcl 824(%esp), %eax
+ movl %eax, 156(%esp) # 4-byte Spill
+ movl 140(%esp), %eax # 4-byte Reload
+ adcl 828(%esp), %eax
+ movl %eax, 140(%esp) # 4-byte Spill
+ adcl $0, 152(%esp) # 4-byte Folded Spill
+ adcl $0, 148(%esp) # 4-byte Folded Spill
+ adcl $0, 128(%esp) # 4-byte Folded Spill
+ movl 132(%esp), %eax # 4-byte Reload
+ adcl $0, %eax
+ movl %eax, %edi
+ adcl $0, 144(%esp) # 4-byte Folded Spill
+ adcl $0, 136(%esp) # 4-byte Folded Spill
+ adcl $0, 116(%esp) # 4-byte Folded Spill
+ adcl $0, %ebp
+ movl %ebp, 112(%esp) # 4-byte Spill
+ movl 96(%esp), %ebp # 4-byte Reload
+ adcl $0, %ebp
+ adcl $0, 100(%esp) # 4-byte Folded Spill
+ adcl $0, 92(%esp) # 4-byte Folded Spill
+ adcl $0, %esi
+ movl %esi, 80(%esp) # 4-byte Spill
+ movl %ecx, %esi
+ movl %esi, %eax
+ imull 72(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 720(%esp), %ecx
+ movl 920(%esp), %edx
+ movl 84(%esp), %ebx # 4-byte Reload
+ calll .LmulPv416x32
+ addl 720(%esp), %esi
+ movl 56(%esp), %esi # 4-byte Reload
+ adcl 724(%esp), %esi
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 728(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 732(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 736(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 740(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 744(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 748(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 752(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 124(%esp), %eax # 4-byte Reload
+ adcl 756(%esp), %eax
+ movl %eax, 124(%esp) # 4-byte Spill
+ movl 120(%esp), %eax # 4-byte Reload
+ adcl 760(%esp), %eax
+ movl %eax, 120(%esp) # 4-byte Spill
+ movl 156(%esp), %eax # 4-byte Reload
+ adcl 764(%esp), %eax
+ movl %eax, 156(%esp) # 4-byte Spill
+ movl 140(%esp), %eax # 4-byte Reload
+ adcl 768(%esp), %eax
+ movl %eax, 140(%esp) # 4-byte Spill
+ movl 152(%esp), %eax # 4-byte Reload
+ adcl 772(%esp), %eax
+ movl %eax, 152(%esp) # 4-byte Spill
+ adcl $0, 148(%esp) # 4-byte Folded Spill
+ adcl $0, 128(%esp) # 4-byte Folded Spill
+ adcl $0, %edi
+ movl %edi, 132(%esp) # 4-byte Spill
+ adcl $0, 144(%esp) # 4-byte Folded Spill
+ adcl $0, 136(%esp) # 4-byte Folded Spill
+ adcl $0, 116(%esp) # 4-byte Folded Spill
+ adcl $0, 112(%esp) # 4-byte Folded Spill
+ adcl $0, %ebp
+ movl 100(%esp), %edi # 4-byte Reload
+ adcl $0, %edi
+ adcl $0, 92(%esp) # 4-byte Folded Spill
+ adcl $0, 80(%esp) # 4-byte Folded Spill
+ movl %esi, %eax
+ imull 72(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 664(%esp), %ecx
+ movl 920(%esp), %edx
+ movl 84(%esp), %ebx # 4-byte Reload
+ calll .LmulPv416x32
+ addl 664(%esp), %esi
+ movl 60(%esp), %ecx # 4-byte Reload
+ adcl 668(%esp), %ecx
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 672(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 676(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 680(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 684(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 688(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 692(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 124(%esp), %eax # 4-byte Reload
+ adcl 696(%esp), %eax
+ movl %eax, 124(%esp) # 4-byte Spill
+ movl 120(%esp), %eax # 4-byte Reload
+ adcl 700(%esp), %eax
+ movl %eax, 120(%esp) # 4-byte Spill
+ movl 156(%esp), %eax # 4-byte Reload
+ adcl 704(%esp), %eax
+ movl %eax, 156(%esp) # 4-byte Spill
+ movl 140(%esp), %eax # 4-byte Reload
+ adcl 708(%esp), %eax
+ movl %eax, 140(%esp) # 4-byte Spill
+ movl 152(%esp), %eax # 4-byte Reload
+ adcl 712(%esp), %eax
+ movl %eax, 152(%esp) # 4-byte Spill
+ movl 148(%esp), %eax # 4-byte Reload
+ adcl 716(%esp), %eax
+ movl %eax, 148(%esp) # 4-byte Spill
+ adcl $0, 128(%esp) # 4-byte Folded Spill
+ adcl $0, 132(%esp) # 4-byte Folded Spill
+ adcl $0, 144(%esp) # 4-byte Folded Spill
+ adcl $0, 136(%esp) # 4-byte Folded Spill
+ adcl $0, 116(%esp) # 4-byte Folded Spill
+ adcl $0, 112(%esp) # 4-byte Folded Spill
+ adcl $0, %ebp
+ movl %ebp, 96(%esp) # 4-byte Spill
+ adcl $0, %edi
+ movl %edi, 100(%esp) # 4-byte Spill
+ movl 92(%esp), %esi # 4-byte Reload
+ adcl $0, %esi
+ adcl $0, 80(%esp) # 4-byte Folded Spill
+ movl %ecx, %edi
+ movl %edi, %eax
+ imull 72(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 608(%esp), %ecx
+ movl 920(%esp), %edx
+ movl 84(%esp), %ebx # 4-byte Reload
+ calll .LmulPv416x32
+ addl 608(%esp), %edi
+ movl 64(%esp), %ecx # 4-byte Reload
+ adcl 612(%esp), %ecx
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 616(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 620(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 624(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 628(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 632(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 124(%esp), %eax # 4-byte Reload
+ adcl 636(%esp), %eax
+ movl %eax, 124(%esp) # 4-byte Spill
+ movl 120(%esp), %eax # 4-byte Reload
+ adcl 640(%esp), %eax
+ movl %eax, 120(%esp) # 4-byte Spill
+ movl 156(%esp), %eax # 4-byte Reload
+ adcl 644(%esp), %eax
+ movl %eax, 156(%esp) # 4-byte Spill
+ movl 140(%esp), %eax # 4-byte Reload
+ adcl 648(%esp), %eax
+ movl %eax, 140(%esp) # 4-byte Spill
+ movl 152(%esp), %eax # 4-byte Reload
+ adcl 652(%esp), %eax
+ movl %eax, 152(%esp) # 4-byte Spill
+ movl 148(%esp), %eax # 4-byte Reload
+ adcl 656(%esp), %eax
+ movl %eax, 148(%esp) # 4-byte Spill
+ movl 128(%esp), %eax # 4-byte Reload
+ adcl 660(%esp), %eax
+ movl %eax, 128(%esp) # 4-byte Spill
+ adcl $0, 132(%esp) # 4-byte Folded Spill
+ movl 144(%esp), %edi # 4-byte Reload
+ adcl $0, %edi
+ adcl $0, 136(%esp) # 4-byte Folded Spill
+ adcl $0, 116(%esp) # 4-byte Folded Spill
+ movl 112(%esp), %ebp # 4-byte Reload
+ adcl $0, %ebp
+ adcl $0, 96(%esp) # 4-byte Folded Spill
+ adcl $0, 100(%esp) # 4-byte Folded Spill
+ adcl $0, %esi
+ movl %esi, 92(%esp) # 4-byte Spill
+ adcl $0, 80(%esp) # 4-byte Folded Spill
+ movl %ecx, %esi
+ movl %esi, %eax
+ imull 72(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 552(%esp), %ecx
+ movl 920(%esp), %edx
+ movl 84(%esp), %ebx # 4-byte Reload
+ calll .LmulPv416x32
+ addl 552(%esp), %esi
+ movl 68(%esp), %ecx # 4-byte Reload
+ adcl 556(%esp), %ecx
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 560(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 564(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 568(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 572(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 124(%esp), %eax # 4-byte Reload
+ adcl 576(%esp), %eax
+ movl %eax, 124(%esp) # 4-byte Spill
+ movl 120(%esp), %eax # 4-byte Reload
+ adcl 580(%esp), %eax
+ movl %eax, 120(%esp) # 4-byte Spill
+ movl 156(%esp), %eax # 4-byte Reload
+ adcl 584(%esp), %eax
+ movl %eax, 156(%esp) # 4-byte Spill
+ movl 140(%esp), %eax # 4-byte Reload
+ adcl 588(%esp), %eax
+ movl %eax, 140(%esp) # 4-byte Spill
+ movl 152(%esp), %eax # 4-byte Reload
+ adcl 592(%esp), %eax
+ movl %eax, 152(%esp) # 4-byte Spill
+ movl 148(%esp), %eax # 4-byte Reload
+ adcl 596(%esp), %eax
+ movl %eax, 148(%esp) # 4-byte Spill
+ movl 128(%esp), %eax # 4-byte Reload
+ adcl 600(%esp), %eax
+ movl %eax, 128(%esp) # 4-byte Spill
+ movl 132(%esp), %eax # 4-byte Reload
+ adcl 604(%esp), %eax
+ movl %eax, 132(%esp) # 4-byte Spill
+ adcl $0, %edi
+ movl %edi, 144(%esp) # 4-byte Spill
+ movl 136(%esp), %esi # 4-byte Reload
+ adcl $0, %esi
+ adcl $0, 116(%esp) # 4-byte Folded Spill
+ adcl $0, %ebp
+ movl %ebp, 112(%esp) # 4-byte Spill
+ adcl $0, 96(%esp) # 4-byte Folded Spill
+ adcl $0, 100(%esp) # 4-byte Folded Spill
+ adcl $0, 92(%esp) # 4-byte Folded Spill
+ adcl $0, 80(%esp) # 4-byte Folded Spill
+ movl %ecx, %edi
+ movl %edi, %eax
+ imull 72(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 496(%esp), %ecx
+ movl 920(%esp), %edx
+ movl 84(%esp), %ebx # 4-byte Reload
+ calll .LmulPv416x32
+ addl 496(%esp), %edi
+ movl 76(%esp), %ecx # 4-byte Reload
+ adcl 500(%esp), %ecx
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 504(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 508(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 512(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 124(%esp), %eax # 4-byte Reload
+ adcl 516(%esp), %eax
+ movl %eax, 124(%esp) # 4-byte Spill
+ movl 120(%esp), %eax # 4-byte Reload
+ adcl 520(%esp), %eax
+ movl %eax, 120(%esp) # 4-byte Spill
+ movl 156(%esp), %eax # 4-byte Reload
+ adcl 524(%esp), %eax
+ movl %eax, 156(%esp) # 4-byte Spill
+ movl 140(%esp), %eax # 4-byte Reload
+ adcl 528(%esp), %eax
+ movl %eax, 140(%esp) # 4-byte Spill
+ movl 152(%esp), %ebp # 4-byte Reload
+ adcl 532(%esp), %ebp
+ movl 148(%esp), %edi # 4-byte Reload
+ adcl 536(%esp), %edi
+ movl 128(%esp), %eax # 4-byte Reload
+ adcl 540(%esp), %eax
+ movl %eax, 128(%esp) # 4-byte Spill
+ movl 132(%esp), %eax # 4-byte Reload
+ adcl 544(%esp), %eax
+ movl %eax, 132(%esp) # 4-byte Spill
+ movl 144(%esp), %eax # 4-byte Reload
+ adcl 548(%esp), %eax
+ movl %eax, 144(%esp) # 4-byte Spill
+ adcl $0, %esi
+ movl %esi, 136(%esp) # 4-byte Spill
+ adcl $0, 116(%esp) # 4-byte Folded Spill
+ adcl $0, 112(%esp) # 4-byte Folded Spill
+ adcl $0, 96(%esp) # 4-byte Folded Spill
+ adcl $0, 100(%esp) # 4-byte Folded Spill
+ adcl $0, 92(%esp) # 4-byte Folded Spill
+ adcl $0, 80(%esp) # 4-byte Folded Spill
+ movl %ecx, %eax
+ movl %ecx, %esi
+ imull 72(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 440(%esp), %ecx
+ movl 920(%esp), %edx
+ movl 84(%esp), %ebx # 4-byte Reload
+ calll .LmulPv416x32
+ addl 440(%esp), %esi
+ movl 88(%esp), %ecx # 4-byte Reload
+ adcl 444(%esp), %ecx
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 448(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 452(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 124(%esp), %eax # 4-byte Reload
+ adcl 456(%esp), %eax
+ movl %eax, 124(%esp) # 4-byte Spill
+ movl 120(%esp), %eax # 4-byte Reload
+ adcl 460(%esp), %eax
+ movl %eax, 120(%esp) # 4-byte Spill
+ movl 156(%esp), %eax # 4-byte Reload
+ adcl 464(%esp), %eax
+ movl %eax, 156(%esp) # 4-byte Spill
+ movl 140(%esp), %eax # 4-byte Reload
+ adcl 468(%esp), %eax
+ movl %eax, 140(%esp) # 4-byte Spill
+ adcl 472(%esp), %ebp
+ movl %ebp, 152(%esp) # 4-byte Spill
+ adcl 476(%esp), %edi
+ movl %edi, 148(%esp) # 4-byte Spill
+ movl 128(%esp), %eax # 4-byte Reload
+ adcl 480(%esp), %eax
+ movl %eax, 128(%esp) # 4-byte Spill
+ movl 132(%esp), %eax # 4-byte Reload
+ adcl 484(%esp), %eax
+ movl %eax, 132(%esp) # 4-byte Spill
+ movl 144(%esp), %eax # 4-byte Reload
+ adcl 488(%esp), %eax
+ movl %eax, 144(%esp) # 4-byte Spill
+ movl 136(%esp), %eax # 4-byte Reload
+ adcl 492(%esp), %eax
+ movl %eax, 136(%esp) # 4-byte Spill
+ adcl $0, 116(%esp) # 4-byte Folded Spill
+ adcl $0, 112(%esp) # 4-byte Folded Spill
+ adcl $0, 96(%esp) # 4-byte Folded Spill
+ adcl $0, 100(%esp) # 4-byte Folded Spill
+ adcl $0, 92(%esp) # 4-byte Folded Spill
+ adcl $0, 80(%esp) # 4-byte Folded Spill
+ movl %ecx, %esi
+ movl %esi, %eax
+ imull 72(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 384(%esp), %ecx
+ movl 920(%esp), %edx
+ movl 84(%esp), %ebx # 4-byte Reload
+ calll .LmulPv416x32
+ addl 384(%esp), %esi
+ movl 104(%esp), %ecx # 4-byte Reload
+ adcl 388(%esp), %ecx
+ movl %ecx, 104(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 392(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 124(%esp), %eax # 4-byte Reload
+ adcl 396(%esp), %eax
+ movl %eax, 124(%esp) # 4-byte Spill
+ movl 120(%esp), %eax # 4-byte Reload
+ adcl 400(%esp), %eax
+ movl %eax, 120(%esp) # 4-byte Spill
+ movl 156(%esp), %ebp # 4-byte Reload
+ adcl 404(%esp), %ebp
+ movl 140(%esp), %edi # 4-byte Reload
+ adcl 408(%esp), %edi
+ movl 152(%esp), %eax # 4-byte Reload
+ adcl 412(%esp), %eax
+ movl %eax, 152(%esp) # 4-byte Spill
+ movl 148(%esp), %eax # 4-byte Reload
+ adcl 416(%esp), %eax
+ movl %eax, 148(%esp) # 4-byte Spill
+ movl 128(%esp), %esi # 4-byte Reload
+ adcl 420(%esp), %esi
+ movl 132(%esp), %eax # 4-byte Reload
+ adcl 424(%esp), %eax
+ movl %eax, 132(%esp) # 4-byte Spill
+ movl 144(%esp), %eax # 4-byte Reload
+ adcl 428(%esp), %eax
+ movl %eax, 144(%esp) # 4-byte Spill
+ movl 136(%esp), %eax # 4-byte Reload
+ adcl 432(%esp), %eax
+ movl %eax, 136(%esp) # 4-byte Spill
+ movl 116(%esp), %eax # 4-byte Reload
+ adcl 436(%esp), %eax
+ movl %eax, 116(%esp) # 4-byte Spill
+ adcl $0, 112(%esp) # 4-byte Folded Spill
+ adcl $0, 96(%esp) # 4-byte Folded Spill
+ adcl $0, 100(%esp) # 4-byte Folded Spill
+ adcl $0, 92(%esp) # 4-byte Folded Spill
+ adcl $0, 80(%esp) # 4-byte Folded Spill
+ movl %ecx, %eax
+ imull 72(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 328(%esp), %ecx
+ movl 920(%esp), %eax
+ movl %eax, %edx
+ movl 84(%esp), %ebx # 4-byte Reload
+ calll .LmulPv416x32
+ movl 104(%esp), %eax # 4-byte Reload
+ addl 328(%esp), %eax
+ movl 108(%esp), %ecx # 4-byte Reload
+ adcl 332(%esp), %ecx
+ movl 124(%esp), %eax # 4-byte Reload
+ adcl 336(%esp), %eax
+ movl %eax, 124(%esp) # 4-byte Spill
+ movl 120(%esp), %eax # 4-byte Reload
+ adcl 340(%esp), %eax
+ movl %eax, 120(%esp) # 4-byte Spill
+ adcl 344(%esp), %ebp
+ movl %ebp, 156(%esp) # 4-byte Spill
+ adcl 348(%esp), %edi
+ movl %edi, 140(%esp) # 4-byte Spill
+ movl 152(%esp), %eax # 4-byte Reload
+ adcl 352(%esp), %eax
+ movl %eax, 152(%esp) # 4-byte Spill
+ movl 148(%esp), %eax # 4-byte Reload
+ adcl 356(%esp), %eax
+ movl %eax, 148(%esp) # 4-byte Spill
+ adcl 360(%esp), %esi
+ movl %esi, 128(%esp) # 4-byte Spill
+ movl 132(%esp), %eax # 4-byte Reload
+ adcl 364(%esp), %eax
+ movl %eax, 132(%esp) # 4-byte Spill
+ movl 144(%esp), %eax # 4-byte Reload
+ adcl 368(%esp), %eax
+ movl %eax, 144(%esp) # 4-byte Spill
+ movl 136(%esp), %eax # 4-byte Reload
+ adcl 372(%esp), %eax
+ movl %eax, 136(%esp) # 4-byte Spill
+ movl 116(%esp), %eax # 4-byte Reload
+ adcl 376(%esp), %eax
+ movl %eax, 116(%esp) # 4-byte Spill
+ movl 112(%esp), %eax # 4-byte Reload
+ adcl 380(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ adcl $0, 96(%esp) # 4-byte Folded Spill
+ adcl $0, 100(%esp) # 4-byte Folded Spill
+ adcl $0, 92(%esp) # 4-byte Folded Spill
+ movl 80(%esp), %ebp # 4-byte Reload
+ adcl $0, %ebp
+ movl %ecx, %edi
+ movl %edi, %eax
+ movl 72(%esp), %esi # 4-byte Reload
+ imull %esi, %eax
+ movl %eax, (%esp)
+ leal 272(%esp), %ecx
+ movl 920(%esp), %edx
+ movl 84(%esp), %ebx # 4-byte Reload
+ calll .LmulPv416x32
+ addl 272(%esp), %edi
+ movl 124(%esp), %eax # 4-byte Reload
+ adcl 276(%esp), %eax
+ movl 120(%esp), %edi # 4-byte Reload
+ adcl 280(%esp), %edi
+ movl 156(%esp), %ecx # 4-byte Reload
+ adcl 284(%esp), %ecx
+ movl %ecx, 156(%esp) # 4-byte Spill
+ movl 140(%esp), %ecx # 4-byte Reload
+ adcl 288(%esp), %ecx
+ movl %ecx, 140(%esp) # 4-byte Spill
+ movl 152(%esp), %ecx # 4-byte Reload
+ adcl 292(%esp), %ecx
+ movl %ecx, 152(%esp) # 4-byte Spill
+ movl 148(%esp), %ecx # 4-byte Reload
+ adcl 296(%esp), %ecx
+ movl %ecx, 148(%esp) # 4-byte Spill
+ movl 128(%esp), %ecx # 4-byte Reload
+ adcl 300(%esp), %ecx
+ movl %ecx, 128(%esp) # 4-byte Spill
+ movl 132(%esp), %ecx # 4-byte Reload
+ adcl 304(%esp), %ecx
+ movl %ecx, 132(%esp) # 4-byte Spill
+ movl 144(%esp), %ecx # 4-byte Reload
+ adcl 308(%esp), %ecx
+ movl %ecx, 144(%esp) # 4-byte Spill
+ movl 136(%esp), %ecx # 4-byte Reload
+ adcl 312(%esp), %ecx
+ movl %ecx, 136(%esp) # 4-byte Spill
+ movl 116(%esp), %ecx # 4-byte Reload
+ adcl 316(%esp), %ecx
+ movl %ecx, 116(%esp) # 4-byte Spill
+ movl 112(%esp), %ecx # 4-byte Reload
+ adcl 320(%esp), %ecx
+ movl %ecx, 112(%esp) # 4-byte Spill
+ movl 96(%esp), %ecx # 4-byte Reload
+ adcl 324(%esp), %ecx
+ movl %ecx, 96(%esp) # 4-byte Spill
+ adcl $0, 100(%esp) # 4-byte Folded Spill
+ adcl $0, 92(%esp) # 4-byte Folded Spill
+ adcl $0, %ebp
+ movl %ebp, 80(%esp) # 4-byte Spill
+ movl %eax, %ebp
+ imull %esi, %eax
+ movl %eax, (%esp)
+ leal 216(%esp), %ecx
+ movl 920(%esp), %edx
+ movl 84(%esp), %ebx # 4-byte Reload
+ calll .LmulPv416x32
+ addl 216(%esp), %ebp
+ movl %edi, %ecx
+ adcl 220(%esp), %ecx
+ movl 156(%esp), %eax # 4-byte Reload
+ adcl 224(%esp), %eax
+ movl %eax, 156(%esp) # 4-byte Spill
+ movl 140(%esp), %ebp # 4-byte Reload
+ adcl 228(%esp), %ebp
+ movl 152(%esp), %eax # 4-byte Reload
+ adcl 232(%esp), %eax
+ movl %eax, 152(%esp) # 4-byte Spill
+ movl 148(%esp), %eax # 4-byte Reload
+ adcl 236(%esp), %eax
+ movl %eax, 148(%esp) # 4-byte Spill
+ movl 128(%esp), %eax # 4-byte Reload
+ adcl 240(%esp), %eax
+ movl %eax, 128(%esp) # 4-byte Spill
+ movl 132(%esp), %edi # 4-byte Reload
+ adcl 244(%esp), %edi
+ movl 144(%esp), %eax # 4-byte Reload
+ adcl 248(%esp), %eax
+ movl %eax, 144(%esp) # 4-byte Spill
+ movl 136(%esp), %eax # 4-byte Reload
+ adcl 252(%esp), %eax
+ movl %eax, 136(%esp) # 4-byte Spill
+ movl 116(%esp), %eax # 4-byte Reload
+ adcl 256(%esp), %eax
+ movl %eax, 116(%esp) # 4-byte Spill
+ movl 112(%esp), %eax # 4-byte Reload
+ adcl 260(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 264(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 268(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ adcl $0, 92(%esp) # 4-byte Folded Spill
+ adcl $0, 80(%esp) # 4-byte Folded Spill
+ movl %esi, %eax
+ imull %ecx, %eax
+ movl %ecx, %esi
+ movl %eax, (%esp)
+ leal 160(%esp), %ecx
+ movl 920(%esp), %edx
+ movl 84(%esp), %ebx # 4-byte Reload
+ calll .LmulPv416x32
+ addl 160(%esp), %esi
+ movl 156(%esp), %eax # 4-byte Reload
+ adcl 164(%esp), %eax
+ movl %eax, 156(%esp) # 4-byte Spill
+ adcl 168(%esp), %ebp
+ movl %ebp, 140(%esp) # 4-byte Spill
+ movl %ebp, %ebx
+ movl 152(%esp), %ecx # 4-byte Reload
+ adcl 172(%esp), %ecx
+ movl %ecx, 152(%esp) # 4-byte Spill
+ movl 148(%esp), %ebp # 4-byte Reload
+ adcl 176(%esp), %ebp
+ movl %ebp, 148(%esp) # 4-byte Spill
+ movl 128(%esp), %eax # 4-byte Reload
+ adcl 180(%esp), %eax
+ movl %eax, 128(%esp) # 4-byte Spill
+ movl %eax, %edx
+ movl %edi, %eax
+ adcl 184(%esp), %eax
+ movl %eax, 132(%esp) # 4-byte Spill
+ movl 144(%esp), %eax # 4-byte Reload
+ adcl 188(%esp), %eax
+ movl %eax, 144(%esp) # 4-byte Spill
+ movl 136(%esp), %eax # 4-byte Reload
+ adcl 192(%esp), %eax
+ movl %eax, 136(%esp) # 4-byte Spill
+ movl 116(%esp), %eax # 4-byte Reload
+ adcl 196(%esp), %eax
+ movl %eax, 116(%esp) # 4-byte Spill
+ movl 112(%esp), %eax # 4-byte Reload
+ adcl 200(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 204(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 208(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 212(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 80(%esp), %esi # 4-byte Reload
+ adcl $0, %esi
+ movl 156(%esp), %edi # 4-byte Reload
+ subl 12(%esp), %edi # 4-byte Folded Reload
+ sbbl 4(%esp), %ebx # 4-byte Folded Reload
+ sbbl 8(%esp), %ecx # 4-byte Folded Reload
+ sbbl 16(%esp), %ebp # 4-byte Folded Reload
+ sbbl 20(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 72(%esp) # 4-byte Spill
+ movl 132(%esp), %edx # 4-byte Reload
+ sbbl 24(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 76(%esp) # 4-byte Spill
+ movl 144(%esp), %edx # 4-byte Reload
+ sbbl 28(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 80(%esp) # 4-byte Spill
+ movl 136(%esp), %edx # 4-byte Reload
+ sbbl 32(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 84(%esp) # 4-byte Spill
+ movl 116(%esp), %edx # 4-byte Reload
+ sbbl 36(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 88(%esp) # 4-byte Spill
+ movl 112(%esp), %edx # 4-byte Reload
+ sbbl 40(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 104(%esp) # 4-byte Spill
+ movl 96(%esp), %edx # 4-byte Reload
+ sbbl 44(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 108(%esp) # 4-byte Spill
+ movl 100(%esp), %edx # 4-byte Reload
+ sbbl 48(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 120(%esp) # 4-byte Spill
+ movl %eax, %edx
+ movl %esi, %eax
+ sbbl 52(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 124(%esp) # 4-byte Spill
+ sbbl $0, %eax
+ andl $1, %eax
+ jne .LBB196_2
+# BB#1:
+ movl %ebp, 148(%esp) # 4-byte Spill
+.LBB196_2:
+ testb %al, %al
+ movl 156(%esp), %ebp # 4-byte Reload
+ jne .LBB196_4
+# BB#3:
+ movl %edi, %ebp
+.LBB196_4:
+ movl 912(%esp), %edi
+ movl %ebp, (%edi)
+ movl 140(%esp), %ebp # 4-byte Reload
+ jne .LBB196_6
+# BB#5:
+ movl %ebx, %ebp
+.LBB196_6:
+ movl %ebp, 4(%edi)
+ movl 152(%esp), %ebx # 4-byte Reload
+ jne .LBB196_8
+# BB#7:
+ movl %ecx, %ebx
+.LBB196_8:
+ movl %ebx, 8(%edi)
+ movl 148(%esp), %esi # 4-byte Reload
+ movl %esi, 12(%edi)
+ movl 116(%esp), %ebx # 4-byte Reload
+ movl 128(%esp), %esi # 4-byte Reload
+ jne .LBB196_10
+# BB#9:
+ movl 72(%esp), %esi # 4-byte Reload
+.LBB196_10:
+ movl %esi, 16(%edi)
+ movl 112(%esp), %esi # 4-byte Reload
+ movl 132(%esp), %edx # 4-byte Reload
+ jne .LBB196_12
+# BB#11:
+ movl 76(%esp), %edx # 4-byte Reload
+.LBB196_12:
+ movl %edx, 20(%edi)
+ movl 96(%esp), %edx # 4-byte Reload
+ movl 144(%esp), %ecx # 4-byte Reload
+ jne .LBB196_14
+# BB#13:
+ movl 80(%esp), %ecx # 4-byte Reload
+.LBB196_14:
+ movl %ecx, 24(%edi)
+ movl 100(%esp), %ecx # 4-byte Reload
+ movl 136(%esp), %eax # 4-byte Reload
+ jne .LBB196_16
+# BB#15:
+ movl 84(%esp), %eax # 4-byte Reload
+.LBB196_16:
+ movl %eax, 28(%edi)
+ movl 92(%esp), %eax # 4-byte Reload
+ jne .LBB196_18
+# BB#17:
+ movl 88(%esp), %ebx # 4-byte Reload
+.LBB196_18:
+ movl %ebx, 32(%edi)
+ jne .LBB196_20
+# BB#19:
+ movl 104(%esp), %esi # 4-byte Reload
+.LBB196_20:
+ movl %esi, 36(%edi)
+ jne .LBB196_22
+# BB#21:
+ movl 108(%esp), %edx # 4-byte Reload
+.LBB196_22:
+ movl %edx, 40(%edi)
+ jne .LBB196_24
+# BB#23:
+ movl 120(%esp), %ecx # 4-byte Reload
+.LBB196_24:
+ movl %ecx, 44(%edi)
+ jne .LBB196_26
+# BB#25:
+ movl 124(%esp), %eax # 4-byte Reload
+.LBB196_26:
+ movl %eax, 48(%edi)
+ addl $892, %esp # imm = 0x37C
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end196:
+ .size mcl_fp_montRed13L, .Lfunc_end196-mcl_fp_montRed13L
+
+ .globl mcl_fp_addPre13L
+ .align 16, 0x90
+ .type mcl_fp_addPre13L,@function
+mcl_fp_addPre13L: # @mcl_fp_addPre13L
+# BB#0:
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ movl 24(%esp), %eax
+ movl (%eax), %edx
+ movl 4(%eax), %esi
+ movl 20(%esp), %ecx
+ addl (%ecx), %edx
+ adcl 4(%ecx), %esi
+ movl 8(%eax), %edi
+ adcl 8(%ecx), %edi
+ movl 16(%esp), %ebx
+ movl %edx, (%ebx)
+ movl 12(%ecx), %edx
+ movl %esi, 4(%ebx)
+ movl 16(%ecx), %esi
+ adcl 12(%eax), %edx
+ adcl 16(%eax), %esi
+ movl %edi, 8(%ebx)
+ movl 20(%eax), %edi
+ movl %edx, 12(%ebx)
+ movl 20(%ecx), %edx
+ adcl %edi, %edx
+ movl 24(%eax), %edi
+ movl %esi, 16(%ebx)
+ movl 24(%ecx), %esi
+ adcl %edi, %esi
+ movl 28(%eax), %edi
+ movl %edx, 20(%ebx)
+ movl 28(%ecx), %edx
+ adcl %edi, %edx
+ movl 32(%eax), %edi
+ movl %esi, 24(%ebx)
+ movl 32(%ecx), %esi
+ adcl %edi, %esi
+ movl 36(%eax), %edi
+ movl %edx, 28(%ebx)
+ movl 36(%ecx), %edx
+ adcl %edi, %edx
+ movl 40(%eax), %edi
+ movl %esi, 32(%ebx)
+ movl 40(%ecx), %esi
+ adcl %edi, %esi
+ movl 44(%eax), %edi
+ movl %edx, 36(%ebx)
+ movl 44(%ecx), %edx
+ adcl %edi, %edx
+ movl %esi, 40(%ebx)
+ movl %edx, 44(%ebx)
+ movl 48(%eax), %eax
+ movl 48(%ecx), %ecx
+ adcl %eax, %ecx
+ movl %ecx, 48(%ebx)
+ sbbl %eax, %eax
+ andl $1, %eax
+ popl %esi
+ popl %edi
+ popl %ebx
+ retl
+.Lfunc_end197:
+ .size mcl_fp_addPre13L, .Lfunc_end197-mcl_fp_addPre13L
+
+ .globl mcl_fp_subPre13L
+ .align 16, 0x90
+ .type mcl_fp_subPre13L,@function
+mcl_fp_subPre13L: # @mcl_fp_subPre13L
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ movl 24(%esp), %ecx
+ movl (%ecx), %esi
+ movl 4(%ecx), %edi
+ xorl %eax, %eax
+ movl 28(%esp), %edx
+ subl (%edx), %esi
+ sbbl 4(%edx), %edi
+ movl 8(%ecx), %ebx
+ sbbl 8(%edx), %ebx
+ movl 20(%esp), %ebp
+ movl %esi, (%ebp)
+ movl 12(%ecx), %esi
+ sbbl 12(%edx), %esi
+ movl %edi, 4(%ebp)
+ movl 16(%ecx), %edi
+ sbbl 16(%edx), %edi
+ movl %ebx, 8(%ebp)
+ movl 20(%edx), %ebx
+ movl %esi, 12(%ebp)
+ movl 20(%ecx), %esi
+ sbbl %ebx, %esi
+ movl 24(%edx), %ebx
+ movl %edi, 16(%ebp)
+ movl 24(%ecx), %edi
+ sbbl %ebx, %edi
+ movl 28(%edx), %ebx
+ movl %esi, 20(%ebp)
+ movl 28(%ecx), %esi
+ sbbl %ebx, %esi
+ movl 32(%edx), %ebx
+ movl %edi, 24(%ebp)
+ movl 32(%ecx), %edi
+ sbbl %ebx, %edi
+ movl 36(%edx), %ebx
+ movl %esi, 28(%ebp)
+ movl 36(%ecx), %esi
+ sbbl %ebx, %esi
+ movl 40(%edx), %ebx
+ movl %edi, 32(%ebp)
+ movl 40(%ecx), %edi
+ sbbl %ebx, %edi
+ movl 44(%edx), %ebx
+ movl %esi, 36(%ebp)
+ movl 44(%ecx), %esi
+ sbbl %ebx, %esi
+ movl %edi, 40(%ebp)
+ movl %esi, 44(%ebp)
+ movl 48(%edx), %edx
+ movl 48(%ecx), %ecx
+ sbbl %edx, %ecx
+ movl %ecx, 48(%ebp)
+ sbbl $0, %eax
+ andl $1, %eax
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end198:
+ .size mcl_fp_subPre13L, .Lfunc_end198-mcl_fp_subPre13L
+
+ .globl mcl_fp_shr1_13L
+ .align 16, 0x90
+ .type mcl_fp_shr1_13L,@function
+mcl_fp_shr1_13L: # @mcl_fp_shr1_13L
+# BB#0:
+ pushl %esi
+ movl 12(%esp), %eax
+ movl (%eax), %edx
+ movl 4(%eax), %esi
+ shrdl $1, %esi, %edx
+ movl 8(%esp), %ecx
+ movl %edx, (%ecx)
+ movl 8(%eax), %edx
+ shrdl $1, %edx, %esi
+ movl %esi, 4(%ecx)
+ movl 12(%eax), %esi
+ shrdl $1, %esi, %edx
+ movl %edx, 8(%ecx)
+ movl 16(%eax), %edx
+ shrdl $1, %edx, %esi
+ movl %esi, 12(%ecx)
+ movl 20(%eax), %esi
+ shrdl $1, %esi, %edx
+ movl %edx, 16(%ecx)
+ movl 24(%eax), %edx
+ shrdl $1, %edx, %esi
+ movl %esi, 20(%ecx)
+ movl 28(%eax), %esi
+ shrdl $1, %esi, %edx
+ movl %edx, 24(%ecx)
+ movl 32(%eax), %edx
+ shrdl $1, %edx, %esi
+ movl %esi, 28(%ecx)
+ movl 36(%eax), %esi
+ shrdl $1, %esi, %edx
+ movl %edx, 32(%ecx)
+ movl 40(%eax), %edx
+ shrdl $1, %edx, %esi
+ movl %esi, 36(%ecx)
+ movl 44(%eax), %esi
+ shrdl $1, %esi, %edx
+ movl %edx, 40(%ecx)
+ movl 48(%eax), %eax
+ shrdl $1, %eax, %esi
+ movl %esi, 44(%ecx)
+ shrl %eax
+ movl %eax, 48(%ecx)
+ popl %esi
+ retl
+.Lfunc_end199:
+ .size mcl_fp_shr1_13L, .Lfunc_end199-mcl_fp_shr1_13L
+
+ .globl mcl_fp_add13L
+ .align 16, 0x90
+ .type mcl_fp_add13L,@function
+mcl_fp_add13L: # @mcl_fp_add13L
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $40, %esp
+ movl 68(%esp), %ebp
+ movl (%ebp), %ecx
+ movl 4(%ebp), %eax
+ movl 64(%esp), %ebx
+ addl (%ebx), %ecx
+ movl %ecx, 4(%esp) # 4-byte Spill
+ adcl 4(%ebx), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 8(%ebp), %eax
+ adcl 8(%ebx), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 12(%ebx), %ecx
+ movl 16(%ebx), %eax
+ adcl 12(%ebp), %ecx
+ movl %ecx, 28(%esp) # 4-byte Spill
+ adcl 16(%ebp), %eax
+ movl %eax, 24(%esp) # 4-byte Spill
+ movl 20(%ebx), %eax
+ adcl 20(%ebp), %eax
+ movl %eax, 20(%esp) # 4-byte Spill
+ movl 24(%ebx), %eax
+ adcl 24(%ebp), %eax
+ movl %eax, 16(%esp) # 4-byte Spill
+ movl 28(%ebx), %eax
+ adcl 28(%ebp), %eax
+ movl %eax, 12(%esp) # 4-byte Spill
+ movl 32(%ebx), %eax
+ adcl 32(%ebp), %eax
+ movl %eax, 8(%esp) # 4-byte Spill
+ movl 36(%ebx), %ecx
+ adcl 36(%ebp), %ecx
+ movl %ecx, (%esp) # 4-byte Spill
+ movl 40(%ebx), %edi
+ adcl 40(%ebp), %edi
+ movl 44(%ebx), %edx
+ adcl 44(%ebp), %edx
+ movl 48(%ebx), %esi
+ adcl 48(%ebp), %esi
+ movl 60(%esp), %ebp
+ movl 4(%esp), %ebx # 4-byte Reload
+ movl %ebx, (%ebp)
+ movl 36(%esp), %eax # 4-byte Reload
+ movl %eax, 4(%ebp)
+ movl 32(%esp), %eax # 4-byte Reload
+ movl %eax, 8(%ebp)
+ movl 28(%esp), %eax # 4-byte Reload
+ movl %eax, 12(%ebp)
+ movl 24(%esp), %eax # 4-byte Reload
+ movl %eax, 16(%ebp)
+ movl 20(%esp), %eax # 4-byte Reload
+ movl %eax, 20(%ebp)
+ movl 16(%esp), %eax # 4-byte Reload
+ movl %eax, 24(%ebp)
+ movl 12(%esp), %eax # 4-byte Reload
+ movl %eax, 28(%ebp)
+ movl 8(%esp), %eax # 4-byte Reload
+ movl %eax, 32(%ebp)
+ movl %ecx, 36(%ebp)
+ movl %edi, 40(%ebp)
+ movl %edx, 44(%ebp)
+ movl %esi, 48(%ebp)
+ sbbl %eax, %eax
+ andl $1, %eax
+ movl 72(%esp), %ecx
+ subl (%ecx), %ebx
+ movl %ebx, 4(%esp) # 4-byte Spill
+ movl 36(%esp), %ebx # 4-byte Reload
+ sbbl 4(%ecx), %ebx
+ movl %ebx, 36(%esp) # 4-byte Spill
+ movl 32(%esp), %ebx # 4-byte Reload
+ sbbl 8(%ecx), %ebx
+ movl %ebx, 32(%esp) # 4-byte Spill
+ movl 28(%esp), %ebx # 4-byte Reload
+ sbbl 12(%ecx), %ebx
+ movl %ebx, 28(%esp) # 4-byte Spill
+ movl 24(%esp), %ebx # 4-byte Reload
+ sbbl 16(%ecx), %ebx
+ movl %ebx, 24(%esp) # 4-byte Spill
+ movl 20(%esp), %ebx # 4-byte Reload
+ sbbl 20(%ecx), %ebx
+ movl %ebx, 20(%esp) # 4-byte Spill
+ movl 16(%esp), %ebx # 4-byte Reload
+ sbbl 24(%ecx), %ebx
+ movl %ebx, 16(%esp) # 4-byte Spill
+ movl 12(%esp), %ebx # 4-byte Reload
+ sbbl 28(%ecx), %ebx
+ movl %ebx, 12(%esp) # 4-byte Spill
+ movl 8(%esp), %ebx # 4-byte Reload
+ sbbl 32(%ecx), %ebx
+ movl %ebx, 8(%esp) # 4-byte Spill
+ movl (%esp), %ebx # 4-byte Reload
+ sbbl 36(%ecx), %ebx
+ sbbl 40(%ecx), %edi
+ sbbl 44(%ecx), %edx
+ sbbl 48(%ecx), %esi
+ sbbl $0, %eax
+ testb $1, %al
+ jne .LBB200_2
+# BB#1: # %nocarry
+ movl 4(%esp), %eax # 4-byte Reload
+ movl %eax, (%ebp)
+ movl 36(%esp), %eax # 4-byte Reload
+ movl %eax, 4(%ebp)
+ movl 32(%esp), %eax # 4-byte Reload
+ movl %eax, 8(%ebp)
+ movl 28(%esp), %eax # 4-byte Reload
+ movl %eax, 12(%ebp)
+ movl 24(%esp), %eax # 4-byte Reload
+ movl %eax, 16(%ebp)
+ movl 20(%esp), %eax # 4-byte Reload
+ movl %eax, 20(%ebp)
+ movl 16(%esp), %eax # 4-byte Reload
+ movl %eax, 24(%ebp)
+ movl 12(%esp), %eax # 4-byte Reload
+ movl %eax, 28(%ebp)
+ movl 8(%esp), %eax # 4-byte Reload
+ movl %eax, 32(%ebp)
+ movl %ebx, 36(%ebp)
+ movl %edi, 40(%ebp)
+ movl %edx, 44(%ebp)
+ movl %esi, 48(%ebp)
+.LBB200_2: # %carry
+ addl $40, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end200:
+ .size mcl_fp_add13L, .Lfunc_end200-mcl_fp_add13L
+
+ .globl mcl_fp_addNF13L
+ .align 16, 0x90
+ .type mcl_fp_addNF13L,@function
+mcl_fp_addNF13L: # @mcl_fp_addNF13L
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $100, %esp
+ movl 128(%esp), %esi
+ movl (%esi), %ecx
+ movl 4(%esi), %eax
+ movl 124(%esp), %edx
+ addl (%edx), %ecx
+ movl %ecx, 64(%esp) # 4-byte Spill
+ adcl 4(%edx), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 48(%esi), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 44(%esi), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 40(%esi), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 36(%esi), %ecx
+ movl %ecx, 84(%esp) # 4-byte Spill
+ movl 32(%esi), %ebp
+ movl 28(%esi), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 24(%esi), %eax
+ movl 20(%esi), %ebx
+ movl 16(%esi), %edi
+ movl 12(%esi), %ecx
+ movl 8(%esi), %esi
+ adcl 8(%edx), %esi
+ movl %esi, 48(%esp) # 4-byte Spill
+ adcl 12(%edx), %ecx
+ movl %ecx, 52(%esp) # 4-byte Spill
+ adcl 16(%edx), %edi
+ movl %edi, 56(%esp) # 4-byte Spill
+ adcl 20(%edx), %ebx
+ movl %ebx, 60(%esp) # 4-byte Spill
+ adcl 24(%edx), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 28(%edx), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ adcl 32(%edx), %ebp
+ movl %ebp, 72(%esp) # 4-byte Spill
+ movl 84(%esp), %ebp # 4-byte Reload
+ adcl 36(%edx), %ebp
+ movl %ebp, 84(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 40(%edx), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 44(%edx), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 48(%edx), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 132(%esp), %edx
+ movl 64(%esp), %eax # 4-byte Reload
+ subl (%edx), %eax
+ movl 68(%esp), %ebp # 4-byte Reload
+ sbbl 4(%edx), %ebp
+ movl %ebp, (%esp) # 4-byte Spill
+ sbbl 8(%edx), %esi
+ movl %esi, 4(%esp) # 4-byte Spill
+ sbbl 12(%edx), %ecx
+ movl %ecx, 8(%esp) # 4-byte Spill
+ sbbl 16(%edx), %edi
+ movl %edi, 12(%esp) # 4-byte Spill
+ sbbl 20(%edx), %ebx
+ movl %ebx, 16(%esp) # 4-byte Spill
+ movl 76(%esp), %ebp # 4-byte Reload
+ sbbl 24(%edx), %ebp
+ movl %ebp, 20(%esp) # 4-byte Spill
+ movl 96(%esp), %ecx # 4-byte Reload
+ sbbl 28(%edx), %ecx
+ movl %ecx, 24(%esp) # 4-byte Spill
+ movl 72(%esp), %ecx # 4-byte Reload
+ sbbl 32(%edx), %ecx
+ movl %ecx, 28(%esp) # 4-byte Spill
+ movl 84(%esp), %esi # 4-byte Reload
+ movl %esi, %ecx
+ movl %esi, %ebp
+ sbbl 36(%edx), %ecx
+ movl %ecx, 32(%esp) # 4-byte Spill
+ movl 80(%esp), %esi # 4-byte Reload
+ movl %esi, %ecx
+ movl %esi, %edi
+ sbbl 40(%edx), %ecx
+ movl %ecx, 36(%esp) # 4-byte Spill
+ movl 92(%esp), %ecx # 4-byte Reload
+ sbbl 44(%edx), %ecx
+ movl %ecx, 40(%esp) # 4-byte Spill
+ movl 88(%esp), %ebx # 4-byte Reload
+ sbbl 48(%edx), %ebx
+ movl %ebx, 44(%esp) # 4-byte Spill
+ sarl $31, %ebx
+ testl %ebx, %ebx
+ movl 64(%esp), %edx # 4-byte Reload
+ js .LBB201_2
+# BB#1:
+ movl %eax, %edx
+.LBB201_2:
+ movl 120(%esp), %esi
+ movl %edx, (%esi)
+ movl 68(%esp), %edx # 4-byte Reload
+ js .LBB201_4
+# BB#3:
+ movl (%esp), %edx # 4-byte Reload
+.LBB201_4:
+ movl %edx, 4(%esi)
+ movl %edi, %edx
+ movl 52(%esp), %ebx # 4-byte Reload
+ movl 48(%esp), %eax # 4-byte Reload
+ js .LBB201_6
+# BB#5:
+ movl 4(%esp), %eax # 4-byte Reload
+.LBB201_6:
+ movl %eax, 8(%esi)
+ movl %ebp, %edi
+ movl 60(%esp), %eax # 4-byte Reload
+ js .LBB201_8
+# BB#7:
+ movl 8(%esp), %ebx # 4-byte Reload
+.LBB201_8:
+ movl %ebx, 12(%esi)
+ movl 96(%esp), %ebp # 4-byte Reload
+ movl 56(%esp), %ecx # 4-byte Reload
+ js .LBB201_10
+# BB#9:
+ movl 12(%esp), %ecx # 4-byte Reload
+.LBB201_10:
+ movl %ecx, 16(%esi)
+ movl 92(%esp), %ecx # 4-byte Reload
+ js .LBB201_12
+# BB#11:
+ movl 16(%esp), %eax # 4-byte Reload
+.LBB201_12:
+ movl %eax, 20(%esi)
+ movl 72(%esp), %ebx # 4-byte Reload
+ js .LBB201_14
+# BB#13:
+ movl 20(%esp), %eax # 4-byte Reload
+ movl %eax, 76(%esp) # 4-byte Spill
+.LBB201_14:
+ movl 76(%esp), %eax # 4-byte Reload
+ movl %eax, 24(%esi)
+ js .LBB201_16
+# BB#15:
+ movl 24(%esp), %ebp # 4-byte Reload
+.LBB201_16:
+ movl %ebp, 28(%esi)
+ js .LBB201_18
+# BB#17:
+ movl 28(%esp), %ebx # 4-byte Reload
+.LBB201_18:
+ movl %ebx, 32(%esi)
+ js .LBB201_20
+# BB#19:
+ movl 32(%esp), %edi # 4-byte Reload
+.LBB201_20:
+ movl %edi, 36(%esi)
+ js .LBB201_22
+# BB#21:
+ movl 36(%esp), %edx # 4-byte Reload
+.LBB201_22:
+ movl %edx, 40(%esi)
+ js .LBB201_24
+# BB#23:
+ movl 40(%esp), %ecx # 4-byte Reload
+.LBB201_24:
+ movl %ecx, 44(%esi)
+ movl 88(%esp), %eax # 4-byte Reload
+ js .LBB201_26
+# BB#25:
+ movl 44(%esp), %eax # 4-byte Reload
+.LBB201_26:
+ movl %eax, 48(%esi)
+ addl $100, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end201:
+ .size mcl_fp_addNF13L, .Lfunc_end201-mcl_fp_addNF13L
+
+ .globl mcl_fp_sub13L
+ .align 16, 0x90
+ .type mcl_fp_sub13L,@function
+mcl_fp_sub13L: # @mcl_fp_sub13L
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $44, %esp
+ movl 68(%esp), %esi
+ movl (%esi), %eax
+ movl 4(%esi), %ecx
+ xorl %ebx, %ebx
+ movl 72(%esp), %edi
+ subl (%edi), %eax
+ movl %eax, 12(%esp) # 4-byte Spill
+ sbbl 4(%edi), %ecx
+ movl %ecx, 28(%esp) # 4-byte Spill
+ movl 8(%esi), %eax
+ sbbl 8(%edi), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 12(%esi), %eax
+ sbbl 12(%edi), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 16(%esi), %eax
+ sbbl 16(%edi), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 20(%esi), %eax
+ sbbl 20(%edi), %eax
+ movl %eax, 24(%esp) # 4-byte Spill
+ movl 24(%esi), %eax
+ sbbl 24(%edi), %eax
+ movl %eax, 20(%esp) # 4-byte Spill
+ movl 28(%esi), %eax
+ sbbl 28(%edi), %eax
+ movl %eax, 16(%esp) # 4-byte Spill
+ movl 32(%esi), %edx
+ sbbl 32(%edi), %edx
+ movl %edx, 8(%esp) # 4-byte Spill
+ movl 36(%esi), %ecx
+ sbbl 36(%edi), %ecx
+ movl %ecx, 4(%esp) # 4-byte Spill
+ movl 40(%esi), %eax
+ sbbl 40(%edi), %eax
+ movl %eax, (%esp) # 4-byte Spill
+ movl 44(%esi), %ebp
+ sbbl 44(%edi), %ebp
+ movl 48(%esi), %esi
+ sbbl 48(%edi), %esi
+ sbbl $0, %ebx
+ testb $1, %bl
+ movl 64(%esp), %ebx
+ movl 12(%esp), %edi # 4-byte Reload
+ movl %edi, (%ebx)
+ movl 28(%esp), %edi # 4-byte Reload
+ movl %edi, 4(%ebx)
+ movl 36(%esp), %edi # 4-byte Reload
+ movl %edi, 8(%ebx)
+ movl 40(%esp), %edi # 4-byte Reload
+ movl %edi, 12(%ebx)
+ movl 32(%esp), %edi # 4-byte Reload
+ movl %edi, 16(%ebx)
+ movl 24(%esp), %edi # 4-byte Reload
+ movl %edi, 20(%ebx)
+ movl 20(%esp), %edi # 4-byte Reload
+ movl %edi, 24(%ebx)
+ movl 16(%esp), %edi # 4-byte Reload
+ movl %edi, 28(%ebx)
+ movl %edx, 32(%ebx)
+ movl %ecx, 36(%ebx)
+ movl %eax, 40(%ebx)
+ movl %ebp, 44(%ebx)
+ movl %esi, 48(%ebx)
+ je .LBB202_2
+# BB#1: # %carry
+ movl %esi, %edi
+ movl 76(%esp), %esi
+ movl 12(%esp), %ecx # 4-byte Reload
+ addl (%esi), %ecx
+ movl %ecx, (%ebx)
+ movl 28(%esp), %edx # 4-byte Reload
+ adcl 4(%esi), %edx
+ movl %edx, 4(%ebx)
+ movl 36(%esp), %ecx # 4-byte Reload
+ adcl 8(%esi), %ecx
+ movl 12(%esi), %eax
+ adcl 40(%esp), %eax # 4-byte Folded Reload
+ movl %ecx, 8(%ebx)
+ movl 16(%esi), %ecx
+ adcl 32(%esp), %ecx # 4-byte Folded Reload
+ movl %eax, 12(%ebx)
+ movl 20(%esi), %eax
+ adcl 24(%esp), %eax # 4-byte Folded Reload
+ movl %ecx, 16(%ebx)
+ movl 24(%esi), %ecx
+ adcl 20(%esp), %ecx # 4-byte Folded Reload
+ movl %eax, 20(%ebx)
+ movl 28(%esi), %eax
+ adcl 16(%esp), %eax # 4-byte Folded Reload
+ movl %ecx, 24(%ebx)
+ movl 32(%esi), %ecx
+ adcl 8(%esp), %ecx # 4-byte Folded Reload
+ movl %eax, 28(%ebx)
+ movl 36(%esi), %eax
+ adcl 4(%esp), %eax # 4-byte Folded Reload
+ movl %ecx, 32(%ebx)
+ movl 40(%esi), %ecx
+ adcl (%esp), %ecx # 4-byte Folded Reload
+ movl %eax, 36(%ebx)
+ movl %ecx, 40(%ebx)
+ movl 44(%esi), %eax
+ adcl %ebp, %eax
+ movl %eax, 44(%ebx)
+ movl 48(%esi), %eax
+ adcl %edi, %eax
+ movl %eax, 48(%ebx)
+.LBB202_2: # %nocarry
+ addl $44, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end202:
+ .size mcl_fp_sub13L, .Lfunc_end202-mcl_fp_sub13L
+
+ .globl mcl_fp_subNF13L
+ .align 16, 0x90
+ .type mcl_fp_subNF13L,@function
+mcl_fp_subNF13L: # @mcl_fp_subNF13L
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $84, %esp
+ movl 108(%esp), %ecx
+ movl (%ecx), %edx
+ movl 4(%ecx), %eax
+ movl 112(%esp), %edi
+ subl (%edi), %edx
+ movl %edx, 56(%esp) # 4-byte Spill
+ sbbl 4(%edi), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 48(%ecx), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 44(%ecx), %edx
+ movl %edx, 76(%esp) # 4-byte Spill
+ movl 40(%ecx), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 36(%ecx), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 32(%ecx), %ebp
+ movl 28(%ecx), %ebx
+ movl 24(%ecx), %esi
+ movl %esi, 80(%esp) # 4-byte Spill
+ movl 20(%ecx), %esi
+ movl 16(%ecx), %edx
+ movl 12(%ecx), %eax
+ movl 8(%ecx), %ecx
+ sbbl 8(%edi), %ecx
+ movl %ecx, 32(%esp) # 4-byte Spill
+ sbbl 12(%edi), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ sbbl 16(%edi), %edx
+ movl %edx, 40(%esp) # 4-byte Spill
+ sbbl 20(%edi), %esi
+ movl %esi, 44(%esp) # 4-byte Spill
+ movl 80(%esp), %ecx # 4-byte Reload
+ sbbl 24(%edi), %ecx
+ movl %ecx, 80(%esp) # 4-byte Spill
+ sbbl 28(%edi), %ebx
+ movl %ebx, 48(%esp) # 4-byte Spill
+ sbbl 32(%edi), %ebp
+ movl %ebp, 52(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ sbbl 36(%edi), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ sbbl 40(%edi), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 76(%esp), %ecx # 4-byte Reload
+ sbbl 44(%edi), %ecx
+ movl %ecx, 76(%esp) # 4-byte Spill
+ movl 64(%esp), %edx # 4-byte Reload
+ sbbl 48(%edi), %edx
+ movl %edx, 64(%esp) # 4-byte Spill
+ movl %edx, %edi
+ movl %edx, %eax
+ sarl $31, %edi
+ movl %edi, %edx
+ shldl $1, %eax, %edx
+ movl 116(%esp), %esi
+ movl 4(%esi), %eax
+ andl %edx, %eax
+ movl %eax, 8(%esp) # 4-byte Spill
+ andl (%esi), %edx
+ movl 48(%esi), %eax
+ andl %edi, %eax
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl 44(%esi), %eax
+ andl %edi, %eax
+ movl %eax, 24(%esp) # 4-byte Spill
+ movl 40(%esi), %eax
+ andl %edi, %eax
+ movl %eax, 20(%esp) # 4-byte Spill
+ movl 36(%esi), %eax
+ andl %edi, %eax
+ movl %eax, 16(%esp) # 4-byte Spill
+ movl 32(%esi), %eax
+ andl %edi, %eax
+ movl %eax, 12(%esp) # 4-byte Spill
+ movl 28(%esi), %eax
+ andl %edi, %eax
+ movl %eax, 4(%esp) # 4-byte Spill
+ movl 24(%esi), %eax
+ andl %edi, %eax
+ movl %eax, (%esp) # 4-byte Spill
+ movl 20(%esi), %ebp
+ andl %edi, %ebp
+ movl 16(%esi), %ebx
+ andl %edi, %ebx
+ movl 12(%esi), %ecx
+ andl %edi, %ecx
+ roll %edi
+ andl 8(%esi), %edi
+ addl 56(%esp), %edx # 4-byte Folded Reload
+ movl 8(%esp), %eax # 4-byte Reload
+ adcl 60(%esp), %eax # 4-byte Folded Reload
+ movl 104(%esp), %esi
+ movl %edx, (%esi)
+ adcl 32(%esp), %edi # 4-byte Folded Reload
+ movl %eax, 4(%esi)
+ adcl 36(%esp), %ecx # 4-byte Folded Reload
+ movl %edi, 8(%esi)
+ adcl 40(%esp), %ebx # 4-byte Folded Reload
+ movl %ecx, 12(%esi)
+ adcl 44(%esp), %ebp # 4-byte Folded Reload
+ movl %ebx, 16(%esi)
+ movl (%esp), %ecx # 4-byte Reload
+ adcl 80(%esp), %ecx # 4-byte Folded Reload
+ movl %ebp, 20(%esi)
+ movl 4(%esp), %eax # 4-byte Reload
+ adcl 48(%esp), %eax # 4-byte Folded Reload
+ movl %ecx, 24(%esi)
+ movl 12(%esp), %ecx # 4-byte Reload
+ adcl 52(%esp), %ecx # 4-byte Folded Reload
+ movl %eax, 28(%esi)
+ movl 16(%esp), %eax # 4-byte Reload
+ adcl 68(%esp), %eax # 4-byte Folded Reload
+ movl %ecx, 32(%esi)
+ movl 20(%esp), %ecx # 4-byte Reload
+ adcl 72(%esp), %ecx # 4-byte Folded Reload
+ movl %eax, 36(%esi)
+ movl 24(%esp), %eax # 4-byte Reload
+ adcl 76(%esp), %eax # 4-byte Folded Reload
+ movl %ecx, 40(%esi)
+ movl %eax, 44(%esi)
+ movl 28(%esp), %eax # 4-byte Reload
+ adcl 64(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 48(%esi)
+ addl $84, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end203:
+ .size mcl_fp_subNF13L, .Lfunc_end203-mcl_fp_subNF13L
+
+ .globl mcl_fpDbl_add13L
+ .align 16, 0x90
+ .type mcl_fpDbl_add13L,@function
+mcl_fpDbl_add13L: # @mcl_fpDbl_add13L
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $96, %esp
+ movl 124(%esp), %ecx
+ movl 120(%esp), %esi
+ movl 12(%esi), %edi
+ movl 16(%esi), %edx
+ movl 8(%ecx), %ebx
+ movl (%ecx), %ebp
+ addl (%esi), %ebp
+ movl 116(%esp), %eax
+ movl %ebp, (%eax)
+ movl 4(%ecx), %ebp
+ adcl 4(%esi), %ebp
+ adcl 8(%esi), %ebx
+ adcl 12(%ecx), %edi
+ adcl 16(%ecx), %edx
+ movl %ebp, 4(%eax)
+ movl 60(%ecx), %ebp
+ movl %ebx, 8(%eax)
+ movl 20(%ecx), %ebx
+ movl %edi, 12(%eax)
+ movl 20(%esi), %edi
+ adcl %ebx, %edi
+ movl 24(%ecx), %ebx
+ movl %edx, 16(%eax)
+ movl 24(%esi), %edx
+ adcl %ebx, %edx
+ movl 28(%ecx), %ebx
+ movl %edi, 20(%eax)
+ movl 28(%esi), %edi
+ adcl %ebx, %edi
+ movl 32(%ecx), %ebx
+ movl %edx, 24(%eax)
+ movl 32(%esi), %edx
+ adcl %ebx, %edx
+ movl 36(%ecx), %ebx
+ movl %edi, 28(%eax)
+ movl 36(%esi), %edi
+ adcl %ebx, %edi
+ movl 40(%ecx), %ebx
+ movl %edx, 32(%eax)
+ movl 40(%esi), %edx
+ adcl %ebx, %edx
+ movl 44(%ecx), %ebx
+ movl %edi, 36(%eax)
+ movl 44(%esi), %edi
+ adcl %ebx, %edi
+ movl 48(%ecx), %ebx
+ movl %edx, 40(%eax)
+ movl 48(%esi), %edx
+ adcl %ebx, %edx
+ movl 52(%ecx), %ebx
+ movl %edi, 44(%eax)
+ movl 52(%esi), %edi
+ adcl %ebx, %edi
+ movl %edi, 76(%esp) # 4-byte Spill
+ movl 56(%ecx), %edi
+ movl %edx, 48(%eax)
+ movl 56(%esi), %eax
+ adcl %edi, %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 60(%esi), %eax
+ adcl %ebp, %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 64(%ecx), %edx
+ movl 64(%esi), %eax
+ adcl %edx, %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 68(%ecx), %edx
+ movl 68(%esi), %eax
+ adcl %edx, %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 72(%ecx), %edx
+ movl 72(%esi), %eax
+ adcl %edx, %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 76(%ecx), %edx
+ movl 76(%esi), %eax
+ adcl %edx, %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 80(%ecx), %edx
+ movl 80(%esi), %eax
+ adcl %edx, %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 84(%ecx), %edx
+ movl 84(%esi), %eax
+ adcl %edx, %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 88(%ecx), %edx
+ movl 88(%esi), %edi
+ adcl %edx, %edi
+ movl %edi, 48(%esp) # 4-byte Spill
+ movl 92(%ecx), %edx
+ movl 92(%esi), %eax
+ adcl %edx, %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 96(%ecx), %edx
+ movl 96(%esi), %ebx
+ adcl %edx, %ebx
+ movl %ebx, 56(%esp) # 4-byte Spill
+ movl 100(%ecx), %ecx
+ movl 100(%esi), %esi
+ adcl %ecx, %esi
+ sbbl %edx, %edx
+ andl $1, %edx
+ movl 128(%esp), %ebp
+ movl 76(%esp), %ecx # 4-byte Reload
+ subl (%ebp), %ecx
+ movl %ecx, 32(%esp) # 4-byte Spill
+ movl 80(%esp), %ecx # 4-byte Reload
+ sbbl 4(%ebp), %ecx
+ movl %ecx, 28(%esp) # 4-byte Spill
+ movl 84(%esp), %ecx # 4-byte Reload
+ sbbl 8(%ebp), %ecx
+ movl %ecx, 24(%esp) # 4-byte Spill
+ movl 88(%esp), %ecx # 4-byte Reload
+ sbbl 12(%ebp), %ecx
+ movl %ecx, 20(%esp) # 4-byte Spill
+ movl 92(%esp), %ecx # 4-byte Reload
+ sbbl 16(%ebp), %ecx
+ movl %ecx, 16(%esp) # 4-byte Spill
+ movl 60(%esp), %ecx # 4-byte Reload
+ sbbl 20(%ebp), %ecx
+ movl %ecx, 12(%esp) # 4-byte Spill
+ movl 64(%esp), %ecx # 4-byte Reload
+ sbbl 24(%ebp), %ecx
+ movl %ecx, 8(%esp) # 4-byte Spill
+ movl 68(%esp), %ecx # 4-byte Reload
+ sbbl 28(%ebp), %ecx
+ movl %ecx, 4(%esp) # 4-byte Spill
+ movl 72(%esp), %ecx # 4-byte Reload
+ sbbl 32(%ebp), %ecx
+ movl %ecx, (%esp) # 4-byte Spill
+ sbbl 36(%ebp), %edi
+ movl %edi, 36(%esp) # 4-byte Spill
+ sbbl 40(%ebp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl %ebx, %eax
+ movl %esi, %ebx
+ sbbl 44(%ebp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl %ebx, %ecx
+ sbbl 48(%ebp), %ecx
+ sbbl $0, %edx
+ andl $1, %edx
+ jne .LBB204_2
+# BB#1:
+ movl %ecx, %ebx
+.LBB204_2:
+ testb %dl, %dl
+ movl 76(%esp), %ecx # 4-byte Reload
+ movl 72(%esp), %edx # 4-byte Reload
+ movl 68(%esp), %esi # 4-byte Reload
+ movl 64(%esp), %edi # 4-byte Reload
+ movl 60(%esp), %ebp # 4-byte Reload
+ jne .LBB204_4
+# BB#3:
+ movl (%esp), %edx # 4-byte Reload
+ movl 4(%esp), %esi # 4-byte Reload
+ movl 8(%esp), %edi # 4-byte Reload
+ movl 12(%esp), %ebp # 4-byte Reload
+ movl 16(%esp), %eax # 4-byte Reload
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 20(%esp), %eax # 4-byte Reload
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 24(%esp), %eax # 4-byte Reload
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 28(%esp), %eax # 4-byte Reload
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 32(%esp), %ecx # 4-byte Reload
+.LBB204_4:
+ movl 116(%esp), %eax
+ movl %ecx, 52(%eax)
+ movl 80(%esp), %ecx # 4-byte Reload
+ movl %ecx, 56(%eax)
+ movl 84(%esp), %ecx # 4-byte Reload
+ movl %ecx, 60(%eax)
+ movl 88(%esp), %ecx # 4-byte Reload
+ movl %ecx, 64(%eax)
+ movl 92(%esp), %ecx # 4-byte Reload
+ movl %ecx, 68(%eax)
+ movl %ebp, 72(%eax)
+ movl %edi, 76(%eax)
+ movl %esi, 80(%eax)
+ movl %edx, 84(%eax)
+ movl 56(%esp), %ecx # 4-byte Reload
+ movl 52(%esp), %edx # 4-byte Reload
+ movl 48(%esp), %esi # 4-byte Reload
+ jne .LBB204_6
+# BB#5:
+ movl 36(%esp), %esi # 4-byte Reload
+.LBB204_6:
+ movl %esi, 88(%eax)
+ jne .LBB204_8
+# BB#7:
+ movl 40(%esp), %edx # 4-byte Reload
+.LBB204_8:
+ movl %edx, 92(%eax)
+ jne .LBB204_10
+# BB#9:
+ movl 44(%esp), %ecx # 4-byte Reload
+.LBB204_10:
+ movl %ecx, 96(%eax)
+ movl %ebx, 100(%eax)
+ addl $96, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end204:
+ .size mcl_fpDbl_add13L, .Lfunc_end204-mcl_fpDbl_add13L
+
+ .globl mcl_fpDbl_sub13L
+ .align 16, 0x90
+ .type mcl_fpDbl_sub13L,@function
+mcl_fpDbl_sub13L: # @mcl_fpDbl_sub13L
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $84, %esp
+ movl 108(%esp), %edi
+ movl (%edi), %eax
+ movl 4(%edi), %edx
+ movl 112(%esp), %ebx
+ subl (%ebx), %eax
+ sbbl 4(%ebx), %edx
+ movl 8(%edi), %esi
+ sbbl 8(%ebx), %esi
+ movl 104(%esp), %ecx
+ movl %eax, (%ecx)
+ movl 12(%edi), %eax
+ sbbl 12(%ebx), %eax
+ movl %edx, 4(%ecx)
+ movl 16(%edi), %edx
+ sbbl 16(%ebx), %edx
+ movl %esi, 8(%ecx)
+ movl 20(%ebx), %esi
+ movl %eax, 12(%ecx)
+ movl 20(%edi), %eax
+ sbbl %esi, %eax
+ movl 24(%ebx), %esi
+ movl %edx, 16(%ecx)
+ movl 24(%edi), %edx
+ sbbl %esi, %edx
+ movl 28(%ebx), %esi
+ movl %eax, 20(%ecx)
+ movl 28(%edi), %eax
+ sbbl %esi, %eax
+ movl 32(%ebx), %esi
+ movl %edx, 24(%ecx)
+ movl 32(%edi), %edx
+ sbbl %esi, %edx
+ movl 36(%ebx), %esi
+ movl %eax, 28(%ecx)
+ movl 36(%edi), %eax
+ sbbl %esi, %eax
+ movl 40(%ebx), %esi
+ movl %edx, 32(%ecx)
+ movl 40(%edi), %edx
+ sbbl %esi, %edx
+ movl 44(%ebx), %esi
+ movl %eax, 36(%ecx)
+ movl 44(%edi), %eax
+ sbbl %esi, %eax
+ movl 48(%ebx), %esi
+ movl %edx, 40(%ecx)
+ movl 48(%edi), %edx
+ sbbl %esi, %edx
+ movl 52(%ebx), %esi
+ movl %eax, 44(%ecx)
+ movl 52(%edi), %eax
+ sbbl %esi, %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 56(%ebx), %eax
+ movl %edx, 48(%ecx)
+ movl 56(%edi), %edx
+ sbbl %eax, %edx
+ movl %edx, 28(%esp) # 4-byte Spill
+ movl 60(%ebx), %eax
+ movl 60(%edi), %edx
+ sbbl %eax, %edx
+ movl %edx, 32(%esp) # 4-byte Spill
+ movl 64(%ebx), %eax
+ movl 64(%edi), %edx
+ sbbl %eax, %edx
+ movl %edx, 40(%esp) # 4-byte Spill
+ movl 68(%ebx), %eax
+ movl 68(%edi), %edx
+ sbbl %eax, %edx
+ movl %edx, 48(%esp) # 4-byte Spill
+ movl 72(%ebx), %eax
+ movl 72(%edi), %edx
+ sbbl %eax, %edx
+ movl %edx, 52(%esp) # 4-byte Spill
+ movl 76(%ebx), %eax
+ movl 76(%edi), %edx
+ sbbl %eax, %edx
+ movl %edx, 56(%esp) # 4-byte Spill
+ movl 80(%ebx), %eax
+ movl 80(%edi), %edx
+ sbbl %eax, %edx
+ movl %edx, 60(%esp) # 4-byte Spill
+ movl 84(%ebx), %eax
+ movl 84(%edi), %edx
+ sbbl %eax, %edx
+ movl %edx, 64(%esp) # 4-byte Spill
+ movl 88(%ebx), %eax
+ movl 88(%edi), %edx
+ sbbl %eax, %edx
+ movl %edx, 68(%esp) # 4-byte Spill
+ movl 92(%ebx), %eax
+ movl 92(%edi), %edx
+ sbbl %eax, %edx
+ movl %edx, 72(%esp) # 4-byte Spill
+ movl 96(%ebx), %eax
+ movl 96(%edi), %edx
+ sbbl %eax, %edx
+ movl %edx, 76(%esp) # 4-byte Spill
+ movl 100(%ebx), %eax
+ movl 100(%edi), %edx
+ sbbl %eax, %edx
+ movl %edx, 80(%esp) # 4-byte Spill
+ movl $0, %eax
+ sbbl $0, %eax
+ andl $1, %eax
+ movl 116(%esp), %edi
+ jne .LBB205_1
+# BB#2:
+ movl $0, 44(%esp) # 4-byte Folded Spill
+ jmp .LBB205_3
+.LBB205_1:
+ movl 48(%edi), %edx
+ movl %edx, 44(%esp) # 4-byte Spill
+.LBB205_3:
+ testb %al, %al
+ jne .LBB205_4
+# BB#5:
+ movl $0, 16(%esp) # 4-byte Folded Spill
+ movl $0, %ebx
+ jmp .LBB205_6
+.LBB205_4:
+ movl (%edi), %ebx
+ movl 4(%edi), %eax
+ movl %eax, 16(%esp) # 4-byte Spill
+.LBB205_6:
+ jne .LBB205_7
+# BB#8:
+ movl $0, 24(%esp) # 4-byte Folded Spill
+ jmp .LBB205_9
+.LBB205_7:
+ movl 44(%edi), %eax
+ movl %eax, 24(%esp) # 4-byte Spill
+.LBB205_9:
+ jne .LBB205_10
+# BB#11:
+ movl $0, 20(%esp) # 4-byte Folded Spill
+ jmp .LBB205_12
+.LBB205_10:
+ movl 40(%edi), %eax
+ movl %eax, 20(%esp) # 4-byte Spill
+.LBB205_12:
+ jne .LBB205_13
+# BB#14:
+ movl $0, 12(%esp) # 4-byte Folded Spill
+ jmp .LBB205_15
+.LBB205_13:
+ movl 36(%edi), %eax
+ movl %eax, 12(%esp) # 4-byte Spill
+.LBB205_15:
+ jne .LBB205_16
+# BB#17:
+ movl $0, 8(%esp) # 4-byte Folded Spill
+ jmp .LBB205_18
+.LBB205_16:
+ movl 32(%edi), %eax
+ movl %eax, 8(%esp) # 4-byte Spill
+.LBB205_18:
+ jne .LBB205_19
+# BB#20:
+ movl $0, 4(%esp) # 4-byte Folded Spill
+ jmp .LBB205_21
+.LBB205_19:
+ movl 28(%edi), %eax
+ movl %eax, 4(%esp) # 4-byte Spill
+.LBB205_21:
+ jne .LBB205_22
+# BB#23:
+ movl $0, (%esp) # 4-byte Folded Spill
+ jmp .LBB205_24
+.LBB205_22:
+ movl 24(%edi), %eax
+ movl %eax, (%esp) # 4-byte Spill
+.LBB205_24:
+ jne .LBB205_25
+# BB#26:
+ movl $0, %eax
+ jmp .LBB205_27
+.LBB205_25:
+ movl 20(%edi), %eax
+.LBB205_27:
+ jne .LBB205_28
+# BB#29:
+ movl $0, %edx
+ jmp .LBB205_30
+.LBB205_28:
+ movl 16(%edi), %edx
+.LBB205_30:
+ jne .LBB205_31
+# BB#32:
+ movl $0, %esi
+ jmp .LBB205_33
+.LBB205_31:
+ movl 12(%edi), %esi
+.LBB205_33:
+ jne .LBB205_34
+# BB#35:
+ xorl %edi, %edi
+ jmp .LBB205_36
+.LBB205_34:
+ movl 8(%edi), %edi
+.LBB205_36:
+ addl 36(%esp), %ebx # 4-byte Folded Reload
+ movl 16(%esp), %ebp # 4-byte Reload
+ adcl 28(%esp), %ebp # 4-byte Folded Reload
+ movl %ebx, 52(%ecx)
+ adcl 32(%esp), %edi # 4-byte Folded Reload
+ movl %ebp, 56(%ecx)
+ adcl 40(%esp), %esi # 4-byte Folded Reload
+ movl %edi, 60(%ecx)
+ adcl 48(%esp), %edx # 4-byte Folded Reload
+ movl %esi, 64(%ecx)
+ adcl 52(%esp), %eax # 4-byte Folded Reload
+ movl %edx, 68(%ecx)
+ movl (%esp), %edx # 4-byte Reload
+ adcl 56(%esp), %edx # 4-byte Folded Reload
+ movl %eax, 72(%ecx)
+ movl 4(%esp), %eax # 4-byte Reload
+ adcl 60(%esp), %eax # 4-byte Folded Reload
+ movl %edx, 76(%ecx)
+ movl 8(%esp), %edx # 4-byte Reload
+ adcl 64(%esp), %edx # 4-byte Folded Reload
+ movl %eax, 80(%ecx)
+ movl 12(%esp), %eax # 4-byte Reload
+ adcl 68(%esp), %eax # 4-byte Folded Reload
+ movl %edx, 84(%ecx)
+ movl 20(%esp), %edx # 4-byte Reload
+ adcl 72(%esp), %edx # 4-byte Folded Reload
+ movl %eax, 88(%ecx)
+ movl 24(%esp), %eax # 4-byte Reload
+ adcl 76(%esp), %eax # 4-byte Folded Reload
+ movl %edx, 92(%ecx)
+ movl %eax, 96(%ecx)
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 80(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 100(%ecx)
+ addl $84, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end205:
+ .size mcl_fpDbl_sub13L, .Lfunc_end205-mcl_fpDbl_sub13L
+
+ .align 16, 0x90
+ .type .LmulPv448x32,@function
+.LmulPv448x32: # @mulPv448x32
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $96, %esp
+ movl %edx, %edi
+ movl 116(%esp), %esi
+ movl %esi, %eax
+ mull 52(%edi)
+ movl %edx, 92(%esp) # 4-byte Spill
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl %esi, %eax
+ mull 48(%edi)
+ movl %edx, 84(%esp) # 4-byte Spill
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl %esi, %eax
+ mull 44(%edi)
+ movl %edx, 76(%esp) # 4-byte Spill
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl %esi, %eax
+ mull 40(%edi)
+ movl %edx, 68(%esp) # 4-byte Spill
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl %esi, %eax
+ mull 36(%edi)
+ movl %edx, 60(%esp) # 4-byte Spill
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl %esi, %eax
+ mull 32(%edi)
+ movl %edx, 52(%esp) # 4-byte Spill
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl %esi, %eax
+ mull 28(%edi)
+ movl %edx, 44(%esp) # 4-byte Spill
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl %esi, %eax
+ mull 24(%edi)
+ movl %edx, 36(%esp) # 4-byte Spill
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl %esi, %eax
+ mull 20(%edi)
+ movl %edx, 28(%esp) # 4-byte Spill
+ movl %eax, 24(%esp) # 4-byte Spill
+ movl %esi, %eax
+ mull 16(%edi)
+ movl %edx, 20(%esp) # 4-byte Spill
+ movl %eax, 16(%esp) # 4-byte Spill
+ movl %esi, %eax
+ mull 12(%edi)
+ movl %edx, 12(%esp) # 4-byte Spill
+ movl %eax, 8(%esp) # 4-byte Spill
+ movl %esi, %eax
+ mull 8(%edi)
+ movl %edx, %ebx
+ movl %eax, 4(%esp) # 4-byte Spill
+ movl %esi, %eax
+ mull 4(%edi)
+ movl %edx, %ebp
+ movl %eax, (%esp) # 4-byte Spill
+ movl %esi, %eax
+ mull (%edi)
+ movl %eax, (%ecx)
+ addl (%esp), %edx # 4-byte Folded Reload
+ movl %edx, 4(%ecx)
+ adcl 4(%esp), %ebp # 4-byte Folded Reload
+ movl %ebp, 8(%ecx)
+ adcl 8(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 12(%ecx)
+ movl 12(%esp), %eax # 4-byte Reload
+ adcl 16(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 16(%ecx)
+ movl 20(%esp), %eax # 4-byte Reload
+ adcl 24(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 20(%ecx)
+ movl 28(%esp), %eax # 4-byte Reload
+ adcl 32(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 24(%ecx)
+ movl 36(%esp), %eax # 4-byte Reload
+ adcl 40(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 28(%ecx)
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 48(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 32(%ecx)
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 56(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 36(%ecx)
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 64(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 40(%ecx)
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 72(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 44(%ecx)
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 80(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 48(%ecx)
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 88(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 52(%ecx)
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl $0, %eax
+ movl %eax, 56(%ecx)
+ movl %ecx, %eax
+ addl $96, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end206:
+ .size .LmulPv448x32, .Lfunc_end206-.LmulPv448x32
+
+ .globl mcl_fp_mulUnitPre14L
+ .align 16, 0x90
+ .type mcl_fp_mulUnitPre14L,@function
+mcl_fp_mulUnitPre14L: # @mcl_fp_mulUnitPre14L
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $108, %esp
+ calll .L207$pb
+.L207$pb:
+ popl %ebx
+.Ltmp38:
+ addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp38-.L207$pb), %ebx
+ movl 136(%esp), %eax
+ movl %eax, (%esp)
+ leal 48(%esp), %ecx
+ movl 132(%esp), %edx
+ calll .LmulPv448x32
+ movl 104(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 100(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 96(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 92(%esp), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 88(%esp), %eax
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl 84(%esp), %eax
+ movl %eax, 24(%esp) # 4-byte Spill
+ movl 80(%esp), %eax
+ movl %eax, 20(%esp) # 4-byte Spill
+ movl 76(%esp), %eax
+ movl %eax, 16(%esp) # 4-byte Spill
+ movl 72(%esp), %eax
+ movl %eax, 12(%esp) # 4-byte Spill
+ movl 68(%esp), %ebp
+ movl 64(%esp), %ebx
+ movl 60(%esp), %edi
+ movl 56(%esp), %esi
+ movl 48(%esp), %edx
+ movl 52(%esp), %ecx
+ movl 128(%esp), %eax
+ movl %edx, (%eax)
+ movl %ecx, 4(%eax)
+ movl %esi, 8(%eax)
+ movl %edi, 12(%eax)
+ movl %ebx, 16(%eax)
+ movl %ebp, 20(%eax)
+ movl 12(%esp), %ecx # 4-byte Reload
+ movl %ecx, 24(%eax)
+ movl 16(%esp), %ecx # 4-byte Reload
+ movl %ecx, 28(%eax)
+ movl 20(%esp), %ecx # 4-byte Reload
+ movl %ecx, 32(%eax)
+ movl 24(%esp), %ecx # 4-byte Reload
+ movl %ecx, 36(%eax)
+ movl 28(%esp), %ecx # 4-byte Reload
+ movl %ecx, 40(%eax)
+ movl 32(%esp), %ecx # 4-byte Reload
+ movl %ecx, 44(%eax)
+ movl 36(%esp), %ecx # 4-byte Reload
+ movl %ecx, 48(%eax)
+ movl 40(%esp), %ecx # 4-byte Reload
+ movl %ecx, 52(%eax)
+ movl 44(%esp), %ecx # 4-byte Reload
+ movl %ecx, 56(%eax)
+ addl $108, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end207:
+ .size mcl_fp_mulUnitPre14L, .Lfunc_end207-mcl_fp_mulUnitPre14L
+
+ .globl mcl_fpDbl_mulPre14L
+ .align 16, 0x90
+ .type mcl_fpDbl_mulPre14L,@function
+mcl_fpDbl_mulPre14L: # @mcl_fpDbl_mulPre14L
+# BB#0:
+ pushl %ebp
+ movl %esp, %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $268, %esp # imm = 0x10C
+ calll .L208$pb
+.L208$pb:
+ popl %ebx
+.Ltmp39:
+ addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp39-.L208$pb), %ebx
+ movl %ebx, -192(%ebp) # 4-byte Spill
+ movl 16(%ebp), %esi
+ movl %esi, 8(%esp)
+ movl 12(%ebp), %edi
+ movl %edi, 4(%esp)
+ movl 8(%ebp), %eax
+ movl %eax, (%esp)
+ calll mcl_fpDbl_mulPre7L@PLT
+ leal 28(%esi), %eax
+ movl %eax, 8(%esp)
+ leal 28(%edi), %eax
+ movl %eax, 4(%esp)
+ movl 8(%ebp), %eax
+ leal 56(%eax), %eax
+ movl %eax, (%esp)
+ calll mcl_fpDbl_mulPre7L@PLT
+ movl 44(%edi), %eax
+ movl %eax, -156(%ebp) # 4-byte Spill
+ movl 40(%edi), %eax
+ movl 36(%edi), %edx
+ movl (%edi), %edi
+ movl 12(%ebp), %ecx
+ movl 4(%ecx), %ecx
+ movl 12(%ebp), %ebx
+ addl 28(%ebx), %edi
+ movl %edi, -180(%ebp) # 4-byte Spill
+ movl 12(%ebp), %edi
+ adcl 32(%edi), %ecx
+ movl %ecx, -200(%ebp) # 4-byte Spill
+ adcl 8(%edi), %edx
+ movl %edx, -212(%ebp) # 4-byte Spill
+ adcl 12(%edi), %eax
+ movl %eax, -196(%ebp) # 4-byte Spill
+ movl -156(%ebp), %eax # 4-byte Reload
+ adcl 16(%edi), %eax
+ movl %eax, -156(%ebp) # 4-byte Spill
+ movl %eax, %ebx
+ seto %al
+ lahf
+ movl %eax, %eax
+ movl %eax, -128(%ebp) # 4-byte Spill
+ movl (%esi), %eax
+ addl 28(%esi), %eax
+ movl %eax, -216(%ebp) # 4-byte Spill
+ movl 4(%esi), %eax
+ adcl 32(%esi), %eax
+ movl %eax, -164(%ebp) # 4-byte Spill
+ movl 36(%esi), %eax
+ adcl 8(%esi), %eax
+ movl %eax, -168(%ebp) # 4-byte Spill
+ movl 40(%esi), %eax
+ adcl 12(%esi), %eax
+ movl %eax, -172(%ebp) # 4-byte Spill
+ movl 44(%esi), %eax
+ adcl 16(%esi), %eax
+ movl %eax, -176(%ebp) # 4-byte Spill
+ movl 48(%esi), %ecx
+ adcl 20(%esi), %ecx
+ movl 52(%esi), %eax
+ adcl 24(%esi), %eax
+ pushl %eax
+ seto %al
+ lahf
+ movl %eax, %esi
+ popl %eax
+ movl %esi, -220(%ebp) # 4-byte Spill
+ movl %ebx, %esi
+ movl %edx, -184(%ebp) # 4-byte Spill
+ movl -180(%ebp), %edx # 4-byte Reload
+ movl %edx, -188(%ebp) # 4-byte Spill
+ jb .LBB208_2
+# BB#1:
+ xorl %esi, %esi
+ movl $0, -184(%ebp) # 4-byte Folded Spill
+ movl $0, -188(%ebp) # 4-byte Folded Spill
+.LBB208_2:
+ movl %esi, -204(%ebp) # 4-byte Spill
+ movl 52(%edi), %esi
+ movl 48(%edi), %ebx
+ movl -128(%ebp), %edx # 4-byte Reload
+ pushl %eax
+ movl %edx, %eax
+ addb $127, %al
+ sahf
+ popl %eax
+ adcl 20(%edi), %ebx
+ movl %ebx, -160(%ebp) # 4-byte Spill
+ adcl 24(%edi), %esi
+ movl %esi, -208(%ebp) # 4-byte Spill
+ movl %eax, -148(%ebp) # 4-byte Spill
+ movl %ecx, -152(%ebp) # 4-byte Spill
+ movl -176(%ebp), %esi # 4-byte Reload
+ movl %esi, -128(%ebp) # 4-byte Spill
+ movl -172(%ebp), %esi # 4-byte Reload
+ movl %esi, -132(%ebp) # 4-byte Spill
+ movl -168(%ebp), %esi # 4-byte Reload
+ movl %esi, -136(%ebp) # 4-byte Spill
+ movl -164(%ebp), %esi # 4-byte Reload
+ movl %esi, -140(%ebp) # 4-byte Spill
+ movl -216(%ebp), %ebx # 4-byte Reload
+ movl %ebx, -144(%ebp) # 4-byte Spill
+ jb .LBB208_4
+# BB#3:
+ movl $0, -148(%ebp) # 4-byte Folded Spill
+ movl $0, -152(%ebp) # 4-byte Folded Spill
+ movl $0, -128(%ebp) # 4-byte Folded Spill
+ movl $0, -132(%ebp) # 4-byte Folded Spill
+ movl $0, -136(%ebp) # 4-byte Folded Spill
+ movl $0, -140(%ebp) # 4-byte Folded Spill
+ movl $0, -144(%ebp) # 4-byte Folded Spill
+.LBB208_4:
+ movl -180(%ebp), %edx # 4-byte Reload
+ movl %edx, -96(%ebp)
+ movl -200(%ebp), %esi # 4-byte Reload
+ movl %esi, -92(%ebp)
+ movl -212(%ebp), %edx # 4-byte Reload
+ movl %edx, -88(%ebp)
+ movl -196(%ebp), %edi # 4-byte Reload
+ movl %edi, -84(%ebp)
+ movl -156(%ebp), %edx # 4-byte Reload
+ movl %edx, -80(%ebp)
+ movl %ebx, -124(%ebp)
+ movl -164(%ebp), %edx # 4-byte Reload
+ movl %edx, -120(%ebp)
+ movl -168(%ebp), %edx # 4-byte Reload
+ movl %edx, -116(%ebp)
+ movl -172(%ebp), %edx # 4-byte Reload
+ movl %edx, -112(%ebp)
+ movl -176(%ebp), %edx # 4-byte Reload
+ movl %edx, -108(%ebp)
+ movl %ecx, -104(%ebp)
+ movl %edi, %ebx
+ movl %esi, %edi
+ movl %eax, -100(%ebp)
+ sbbl %edx, %edx
+ movl -160(%ebp), %eax # 4-byte Reload
+ movl %eax, -76(%ebp)
+ movl -208(%ebp), %esi # 4-byte Reload
+ movl %esi, -72(%ebp)
+ movl -220(%ebp), %ecx # 4-byte Reload
+ pushl %eax
+ movl %ecx, %eax
+ addb $127, %al
+ sahf
+ popl %eax
+ jb .LBB208_6
+# BB#5:
+ movl $0, %esi
+ movl $0, %eax
+ movl $0, %ebx
+ movl $0, %edi
+.LBB208_6:
+ movl %eax, -160(%ebp) # 4-byte Spill
+ sbbl %eax, %eax
+ leal -124(%ebp), %ecx
+ movl %ecx, 8(%esp)
+ leal -96(%ebp), %ecx
+ movl %ecx, 4(%esp)
+ leal -68(%ebp), %ecx
+ movl %ecx, (%esp)
+ andl %eax, %edx
+ movl -188(%ebp), %eax # 4-byte Reload
+ addl %eax, -144(%ebp) # 4-byte Folded Spill
+ adcl %edi, -140(%ebp) # 4-byte Folded Spill
+ movl -184(%ebp), %eax # 4-byte Reload
+ adcl %eax, -136(%ebp) # 4-byte Folded Spill
+ adcl %ebx, -132(%ebp) # 4-byte Folded Spill
+ movl -204(%ebp), %eax # 4-byte Reload
+ adcl %eax, -128(%ebp) # 4-byte Folded Spill
+ movl -152(%ebp), %edi # 4-byte Reload
+ adcl -160(%ebp), %edi # 4-byte Folded Reload
+ adcl %esi, -148(%ebp) # 4-byte Folded Spill
+ sbbl %esi, %esi
+ andl $1, %esi
+ andl $1, %edx
+ movl %edx, -156(%ebp) # 4-byte Spill
+ movl -192(%ebp), %ebx # 4-byte Reload
+ calll mcl_fpDbl_mulPre7L@PLT
+ movl -144(%ebp), %eax # 4-byte Reload
+ addl -40(%ebp), %eax
+ movl %eax, -144(%ebp) # 4-byte Spill
+ movl -140(%ebp), %eax # 4-byte Reload
+ adcl -36(%ebp), %eax
+ movl %eax, -140(%ebp) # 4-byte Spill
+ movl -136(%ebp), %eax # 4-byte Reload
+ adcl -32(%ebp), %eax
+ movl %eax, -136(%ebp) # 4-byte Spill
+ movl -132(%ebp), %eax # 4-byte Reload
+ adcl -28(%ebp), %eax
+ movl %eax, -132(%ebp) # 4-byte Spill
+ movl -128(%ebp), %eax # 4-byte Reload
+ adcl -24(%ebp), %eax
+ movl %eax, -128(%ebp) # 4-byte Spill
+ adcl -20(%ebp), %edi
+ movl -148(%ebp), %eax # 4-byte Reload
+ adcl -16(%ebp), %eax
+ movl %eax, -148(%ebp) # 4-byte Spill
+ adcl %esi, -156(%ebp) # 4-byte Folded Spill
+ movl -68(%ebp), %eax
+ movl 8(%ebp), %esi
+ subl (%esi), %eax
+ movl %eax, -172(%ebp) # 4-byte Spill
+ movl -64(%ebp), %ecx
+ sbbl 4(%esi), %ecx
+ movl -60(%ebp), %eax
+ sbbl 8(%esi), %eax
+ movl %eax, -160(%ebp) # 4-byte Spill
+ movl -56(%ebp), %edx
+ sbbl 12(%esi), %edx
+ movl -52(%ebp), %ebx
+ sbbl 16(%esi), %ebx
+ movl -48(%ebp), %eax
+ sbbl 20(%esi), %eax
+ movl %eax, -164(%ebp) # 4-byte Spill
+ movl -44(%ebp), %eax
+ sbbl 24(%esi), %eax
+ movl %eax, -168(%ebp) # 4-byte Spill
+ movl 28(%esi), %eax
+ movl %eax, -176(%ebp) # 4-byte Spill
+ sbbl %eax, -144(%ebp) # 4-byte Folded Spill
+ movl 32(%esi), %eax
+ movl %eax, -180(%ebp) # 4-byte Spill
+ sbbl %eax, -140(%ebp) # 4-byte Folded Spill
+ movl 36(%esi), %eax
+ movl %eax, -184(%ebp) # 4-byte Spill
+ sbbl %eax, -136(%ebp) # 4-byte Folded Spill
+ movl 40(%esi), %eax
+ movl %eax, -188(%ebp) # 4-byte Spill
+ sbbl %eax, -132(%ebp) # 4-byte Folded Spill
+ movl 44(%esi), %eax
+ movl %eax, -192(%ebp) # 4-byte Spill
+ sbbl %eax, -128(%ebp) # 4-byte Folded Spill
+ movl 48(%esi), %eax
+ movl %eax, -196(%ebp) # 4-byte Spill
+ sbbl %eax, %edi
+ movl %edi, -152(%ebp) # 4-byte Spill
+ movl 52(%esi), %eax
+ movl %eax, -200(%ebp) # 4-byte Spill
+ movl -148(%ebp), %edi # 4-byte Reload
+ sbbl %eax, %edi
+ sbbl $0, -156(%ebp) # 4-byte Folded Spill
+ movl 56(%esi), %eax
+ movl %eax, -228(%ebp) # 4-byte Spill
+ subl %eax, -172(%ebp) # 4-byte Folded Spill
+ movl 60(%esi), %eax
+ movl %eax, -232(%ebp) # 4-byte Spill
+ sbbl %eax, %ecx
+ movl 64(%esi), %eax
+ movl %eax, -236(%ebp) # 4-byte Spill
+ sbbl %eax, -160(%ebp) # 4-byte Folded Spill
+ movl 68(%esi), %eax
+ movl %eax, -240(%ebp) # 4-byte Spill
+ sbbl %eax, %edx
+ movl 72(%esi), %eax
+ movl %eax, -244(%ebp) # 4-byte Spill
+ sbbl %eax, %ebx
+ movl 76(%esi), %eax
+ movl %eax, -248(%ebp) # 4-byte Spill
+ sbbl %eax, -164(%ebp) # 4-byte Folded Spill
+ movl 80(%esi), %eax
+ movl %eax, -252(%ebp) # 4-byte Spill
+ sbbl %eax, -168(%ebp) # 4-byte Folded Spill
+ movl 84(%esi), %eax
+ movl %eax, -256(%ebp) # 4-byte Spill
+ sbbl %eax, -144(%ebp) # 4-byte Folded Spill
+ movl 88(%esi), %eax
+ movl %eax, -208(%ebp) # 4-byte Spill
+ sbbl %eax, -140(%ebp) # 4-byte Folded Spill
+ movl 92(%esi), %eax
+ movl %eax, -212(%ebp) # 4-byte Spill
+ sbbl %eax, -136(%ebp) # 4-byte Folded Spill
+ movl 96(%esi), %eax
+ movl %eax, -216(%ebp) # 4-byte Spill
+ sbbl %eax, -132(%ebp) # 4-byte Folded Spill
+ movl 100(%esi), %eax
+ movl %eax, -220(%ebp) # 4-byte Spill
+ sbbl %eax, -128(%ebp) # 4-byte Folded Spill
+ movl 104(%esi), %eax
+ movl %eax, -224(%ebp) # 4-byte Spill
+ sbbl %eax, -152(%ebp) # 4-byte Folded Spill
+ movl 108(%esi), %eax
+ movl %eax, -204(%ebp) # 4-byte Spill
+ sbbl %eax, %edi
+ movl %edi, -148(%ebp) # 4-byte Spill
+ movl -156(%ebp), %edi # 4-byte Reload
+ sbbl $0, %edi
+ movl -172(%ebp), %eax # 4-byte Reload
+ addl -176(%ebp), %eax # 4-byte Folded Reload
+ adcl -180(%ebp), %ecx # 4-byte Folded Reload
+ movl %eax, 28(%esi)
+ movl -160(%ebp), %eax # 4-byte Reload
+ adcl -184(%ebp), %eax # 4-byte Folded Reload
+ movl %ecx, 32(%esi)
+ adcl -188(%ebp), %edx # 4-byte Folded Reload
+ movl %eax, 36(%esi)
+ adcl -192(%ebp), %ebx # 4-byte Folded Reload
+ movl %edx, 40(%esi)
+ movl -164(%ebp), %eax # 4-byte Reload
+ adcl -196(%ebp), %eax # 4-byte Folded Reload
+ movl %ebx, 44(%esi)
+ movl -168(%ebp), %ecx # 4-byte Reload
+ adcl -200(%ebp), %ecx # 4-byte Folded Reload
+ movl %eax, 48(%esi)
+ movl -144(%ebp), %eax # 4-byte Reload
+ adcl -228(%ebp), %eax # 4-byte Folded Reload
+ movl %ecx, 52(%esi)
+ movl -140(%ebp), %ecx # 4-byte Reload
+ adcl -232(%ebp), %ecx # 4-byte Folded Reload
+ movl %eax, 56(%esi)
+ movl -136(%ebp), %eax # 4-byte Reload
+ adcl -236(%ebp), %eax # 4-byte Folded Reload
+ movl %ecx, 60(%esi)
+ movl -132(%ebp), %ecx # 4-byte Reload
+ adcl -240(%ebp), %ecx # 4-byte Folded Reload
+ movl %eax, 64(%esi)
+ movl -128(%ebp), %eax # 4-byte Reload
+ adcl -244(%ebp), %eax # 4-byte Folded Reload
+ movl %ecx, 68(%esi)
+ movl -152(%ebp), %ecx # 4-byte Reload
+ adcl -248(%ebp), %ecx # 4-byte Folded Reload
+ movl %eax, 72(%esi)
+ movl -148(%ebp), %eax # 4-byte Reload
+ adcl -252(%ebp), %eax # 4-byte Folded Reload
+ movl %ecx, 76(%esi)
+ adcl -256(%ebp), %edi # 4-byte Folded Reload
+ movl %eax, 80(%esi)
+ movl %edi, 84(%esi)
+ movl -208(%ebp), %eax # 4-byte Reload
+ adcl $0, %eax
+ movl %eax, 88(%esi)
+ movl -212(%ebp), %eax # 4-byte Reload
+ adcl $0, %eax
+ movl %eax, 92(%esi)
+ movl -216(%ebp), %eax # 4-byte Reload
+ adcl $0, %eax
+ movl %eax, 96(%esi)
+ movl -220(%ebp), %eax # 4-byte Reload
+ adcl $0, %eax
+ movl %eax, 100(%esi)
+ movl -224(%ebp), %eax # 4-byte Reload
+ adcl $0, %eax
+ movl %eax, 104(%esi)
+ movl -204(%ebp), %eax # 4-byte Reload
+ adcl $0, %eax
+ movl %eax, 108(%esi)
+ addl $268, %esp # imm = 0x10C
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end208:
+ .size mcl_fpDbl_mulPre14L, .Lfunc_end208-mcl_fpDbl_mulPre14L
+
+ .globl mcl_fpDbl_sqrPre14L
+ .align 16, 0x90
+ .type mcl_fpDbl_sqrPre14L,@function
+mcl_fpDbl_sqrPre14L: # @mcl_fpDbl_sqrPre14L
+# BB#0:
+ pushl %ebp
+ movl %esp, %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $268, %esp # imm = 0x10C
+ calll .L209$pb
+.L209$pb:
+ popl %ebx
+.Ltmp40:
+ addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp40-.L209$pb), %ebx
+ movl %ebx, -172(%ebp) # 4-byte Spill
+ movl 12(%ebp), %edi
+ movl %edi, 8(%esp)
+ movl %edi, 4(%esp)
+ movl 8(%ebp), %esi
+ movl %esi, (%esp)
+ calll mcl_fpDbl_mulPre7L@PLT
+ leal 28(%edi), %eax
+ movl %eax, 8(%esp)
+ movl %eax, 4(%esp)
+ leal 56(%esi), %eax
+ movl %eax, (%esp)
+ calll mcl_fpDbl_mulPre7L@PLT
+ movl 48(%edi), %eax
+ movl 44(%edi), %ecx
+ movl 36(%edi), %edx
+ movl (%edi), %esi
+ movl 4(%edi), %ebx
+ addl 28(%edi), %esi
+ adcl 32(%edi), %ebx
+ movl %ebx, -164(%ebp) # 4-byte Spill
+ adcl 8(%edi), %edx
+ movl %edx, -160(%ebp) # 4-byte Spill
+ movl 40(%edi), %edx
+ adcl 12(%edi), %edx
+ adcl 16(%edi), %ecx
+ movl %ecx, -180(%ebp) # 4-byte Spill
+ adcl 20(%edi), %eax
+ movl %eax, -176(%ebp) # 4-byte Spill
+ movl 52(%edi), %ecx
+ adcl 24(%edi), %ecx
+ seto %al
+ lahf
+ movl %eax, %eax
+ movl %eax, -184(%ebp) # 4-byte Spill
+ seto %al
+ lahf
+ movl %eax, %eax
+ movl %eax, -152(%ebp) # 4-byte Spill
+ seto %al
+ lahf
+ movl %eax, %eax
+ movl %eax, -148(%ebp) # 4-byte Spill
+ seto %al
+ lahf
+ movl %eax, %eax
+ movl %eax, -144(%ebp) # 4-byte Spill
+ seto %al
+ lahf
+ movl %eax, %eax
+ movl %eax, -140(%ebp) # 4-byte Spill
+ seto %al
+ lahf
+ movl %eax, %eax
+ movl %eax, -136(%ebp) # 4-byte Spill
+ seto %al
+ lahf
+ movl %eax, %edi
+ seto %al
+ lahf
+ movl %eax, %eax
+ sbbl %ebx, %ebx
+ movl %ebx, -128(%ebp) # 4-byte Spill
+ movl %eax, %eax
+ addb $127, %al
+ sahf
+ jb .LBB209_1
+# BB#2:
+ movl %esi, -168(%ebp) # 4-byte Spill
+ movl $0, -132(%ebp) # 4-byte Folded Spill
+ jmp .LBB209_3
+.LBB209_1:
+ leal (%esi,%esi), %eax
+ movl %esi, -168(%ebp) # 4-byte Spill
+ movl %eax, -132(%ebp) # 4-byte Spill
+.LBB209_3:
+ movl %edi, %eax
+ addb $127, %al
+ sahf
+ movl -180(%ebp), %ebx # 4-byte Reload
+ jb .LBB209_4
+# BB#5:
+ movl $0, -156(%ebp) # 4-byte Folded Spill
+ jmp .LBB209_6
+.LBB209_4:
+ movl -164(%ebp), %eax # 4-byte Reload
+ movl -168(%ebp), %esi # 4-byte Reload
+ shldl $1, %esi, %eax
+ movl %eax, -156(%ebp) # 4-byte Spill
+.LBB209_6:
+ movl -176(%ebp), %edi # 4-byte Reload
+ movl -136(%ebp), %eax # 4-byte Reload
+ movl %eax, %eax
+ addb $127, %al
+ sahf
+ jb .LBB209_7
+# BB#8:
+ movl $0, -136(%ebp) # 4-byte Folded Spill
+ jmp .LBB209_9
+.LBB209_7:
+ movl -160(%ebp), %eax # 4-byte Reload
+ movl -164(%ebp), %esi # 4-byte Reload
+ shldl $1, %esi, %eax
+ movl %eax, -136(%ebp) # 4-byte Spill
+.LBB209_9:
+ movl %ebx, %esi
+ movl -140(%ebp), %eax # 4-byte Reload
+ movl %eax, %eax
+ addb $127, %al
+ sahf
+ jb .LBB209_10
+# BB#11:
+ movl $0, -140(%ebp) # 4-byte Folded Spill
+ jmp .LBB209_12
+.LBB209_10:
+ movl %edx, %eax
+ movl -160(%ebp), %ebx # 4-byte Reload
+ shldl $1, %ebx, %eax
+ movl %eax, -140(%ebp) # 4-byte Spill
+.LBB209_12:
+ movl -144(%ebp), %eax # 4-byte Reload
+ movl %eax, %eax
+ addb $127, %al
+ sahf
+ jb .LBB209_13
+# BB#14:
+ movl $0, -144(%ebp) # 4-byte Folded Spill
+ jmp .LBB209_15
+.LBB209_13:
+ movl %esi, %eax
+ shldl $1, %edx, %eax
+ movl %eax, -144(%ebp) # 4-byte Spill
+.LBB209_15:
+ movl -148(%ebp), %eax # 4-byte Reload
+ movl %eax, %eax
+ addb $127, %al
+ sahf
+ jb .LBB209_16
+# BB#17:
+ movl $0, -148(%ebp) # 4-byte Folded Spill
+ jmp .LBB209_18
+.LBB209_16:
+ movl %edi, %eax
+ shldl $1, %esi, %eax
+ movl %eax, -148(%ebp) # 4-byte Spill
+.LBB209_18:
+ movl -152(%ebp), %eax # 4-byte Reload
+ movl %eax, %eax
+ addb $127, %al
+ sahf
+ jb .LBB209_19
+# BB#20:
+ movl $0, -152(%ebp) # 4-byte Folded Spill
+ jmp .LBB209_21
+.LBB209_19:
+ movl %ecx, %eax
+ shldl $1, %edi, %eax
+ movl %eax, -152(%ebp) # 4-byte Spill
+.LBB209_21:
+ movl -168(%ebp), %eax # 4-byte Reload
+ movl %eax, -96(%ebp)
+ movl %eax, -124(%ebp)
+ movl -164(%ebp), %eax # 4-byte Reload
+ movl %eax, -92(%ebp)
+ movl %eax, -120(%ebp)
+ movl -160(%ebp), %eax # 4-byte Reload
+ movl %eax, -88(%ebp)
+ movl %eax, -116(%ebp)
+ movl %edx, -84(%ebp)
+ movl %edx, -112(%ebp)
+ movl %esi, -80(%ebp)
+ movl %esi, -108(%ebp)
+ movl %edi, -76(%ebp)
+ movl %edi, -104(%ebp)
+ movl %ecx, -72(%ebp)
+ movl %ecx, -100(%ebp)
+ movl -184(%ebp), %eax # 4-byte Reload
+ movl %eax, %eax
+ addb $127, %al
+ sahf
+ jb .LBB209_22
+# BB#23:
+ xorl %edi, %edi
+ jmp .LBB209_24
+.LBB209_22:
+ shrl $31, %ecx
+ movl %ecx, %edi
+.LBB209_24:
+ leal -68(%ebp), %eax
+ movl %eax, (%esp)
+ leal -96(%ebp), %eax
+ movl %eax, 4(%esp)
+ leal -124(%ebp), %eax
+ movl %eax, 8(%esp)
+ movl -128(%ebp), %esi # 4-byte Reload
+ andl $1, %esi
+ movl -172(%ebp), %ebx # 4-byte Reload
+ calll mcl_fpDbl_mulPre7L@PLT
+ movl -132(%ebp), %eax # 4-byte Reload
+ addl -40(%ebp), %eax
+ movl %eax, -132(%ebp) # 4-byte Spill
+ movl -156(%ebp), %eax # 4-byte Reload
+ adcl -36(%ebp), %eax
+ movl -136(%ebp), %ecx # 4-byte Reload
+ adcl -32(%ebp), %ecx
+ movl %ecx, -136(%ebp) # 4-byte Spill
+ movl -140(%ebp), %ecx # 4-byte Reload
+ adcl -28(%ebp), %ecx
+ movl %ecx, -140(%ebp) # 4-byte Spill
+ movl -144(%ebp), %ecx # 4-byte Reload
+ adcl -24(%ebp), %ecx
+ movl %ecx, -144(%ebp) # 4-byte Spill
+ movl -148(%ebp), %ecx # 4-byte Reload
+ adcl -20(%ebp), %ecx
+ movl %ecx, -148(%ebp) # 4-byte Spill
+ movl -152(%ebp), %ecx # 4-byte Reload
+ adcl -16(%ebp), %ecx
+ movl %ecx, -152(%ebp) # 4-byte Spill
+ adcl %edi, %esi
+ movl %esi, -128(%ebp) # 4-byte Spill
+ movl -68(%ebp), %ecx
+ movl 8(%ebp), %esi
+ subl (%esi), %ecx
+ movl %ecx, -204(%ebp) # 4-byte Spill
+ movl -64(%ebp), %edi
+ sbbl 4(%esi), %edi
+ movl -60(%ebp), %edx
+ sbbl 8(%esi), %edx
+ movl %edx, -160(%ebp) # 4-byte Spill
+ movl -56(%ebp), %edx
+ sbbl 12(%esi), %edx
+ movl %edx, -168(%ebp) # 4-byte Spill
+ movl -52(%ebp), %ebx
+ sbbl 16(%esi), %ebx
+ movl -48(%ebp), %ecx
+ sbbl 20(%esi), %ecx
+ movl %ecx, -172(%ebp) # 4-byte Spill
+ movl -44(%ebp), %edx
+ sbbl 24(%esi), %edx
+ movl %edx, -164(%ebp) # 4-byte Spill
+ movl 28(%esi), %edx
+ movl %edx, -176(%ebp) # 4-byte Spill
+ sbbl %edx, -132(%ebp) # 4-byte Folded Spill
+ movl 32(%esi), %ecx
+ movl %ecx, -180(%ebp) # 4-byte Spill
+ sbbl %ecx, %eax
+ movl %eax, -156(%ebp) # 4-byte Spill
+ movl 36(%esi), %eax
+ movl %eax, -184(%ebp) # 4-byte Spill
+ sbbl %eax, -136(%ebp) # 4-byte Folded Spill
+ movl 40(%esi), %eax
+ movl %eax, -188(%ebp) # 4-byte Spill
+ sbbl %eax, -140(%ebp) # 4-byte Folded Spill
+ movl 44(%esi), %eax
+ movl %eax, -192(%ebp) # 4-byte Spill
+ sbbl %eax, -144(%ebp) # 4-byte Folded Spill
+ movl 48(%esi), %eax
+ movl %eax, -196(%ebp) # 4-byte Spill
+ sbbl %eax, -148(%ebp) # 4-byte Folded Spill
+ movl 52(%esi), %eax
+ movl %eax, -200(%ebp) # 4-byte Spill
+ sbbl %eax, -152(%ebp) # 4-byte Folded Spill
+ movl -128(%ebp), %ecx # 4-byte Reload
+ sbbl $0, %ecx
+ movl 56(%esi), %eax
+ movl %eax, -228(%ebp) # 4-byte Spill
+ movl -204(%ebp), %edx # 4-byte Reload
+ subl %eax, %edx
+ movl 60(%esi), %eax
+ movl %eax, -232(%ebp) # 4-byte Spill
+ sbbl %eax, %edi
+ movl 64(%esi), %eax
+ movl %eax, -236(%ebp) # 4-byte Spill
+ sbbl %eax, -160(%ebp) # 4-byte Folded Spill
+ movl 68(%esi), %eax
+ movl %eax, -240(%ebp) # 4-byte Spill
+ sbbl %eax, -168(%ebp) # 4-byte Folded Spill
+ movl 72(%esi), %eax
+ movl %eax, -244(%ebp) # 4-byte Spill
+ sbbl %eax, %ebx
+ movl 76(%esi), %eax
+ movl %eax, -248(%ebp) # 4-byte Spill
+ sbbl %eax, -172(%ebp) # 4-byte Folded Spill
+ movl 80(%esi), %eax
+ movl %eax, -252(%ebp) # 4-byte Spill
+ sbbl %eax, -164(%ebp) # 4-byte Folded Spill
+ movl 84(%esi), %eax
+ movl %eax, -256(%ebp) # 4-byte Spill
+ sbbl %eax, -132(%ebp) # 4-byte Folded Spill
+ movl 88(%esi), %eax
+ movl %eax, -204(%ebp) # 4-byte Spill
+ sbbl %eax, -156(%ebp) # 4-byte Folded Spill
+ movl 92(%esi), %eax
+ movl %eax, -208(%ebp) # 4-byte Spill
+ sbbl %eax, -136(%ebp) # 4-byte Folded Spill
+ movl 96(%esi), %eax
+ movl %eax, -212(%ebp) # 4-byte Spill
+ sbbl %eax, -140(%ebp) # 4-byte Folded Spill
+ movl 100(%esi), %eax
+ movl %eax, -216(%ebp) # 4-byte Spill
+ sbbl %eax, -144(%ebp) # 4-byte Folded Spill
+ movl 104(%esi), %eax
+ movl %eax, -220(%ebp) # 4-byte Spill
+ sbbl %eax, -148(%ebp) # 4-byte Folded Spill
+ movl 108(%esi), %eax
+ movl %eax, -224(%ebp) # 4-byte Spill
+ sbbl %eax, -152(%ebp) # 4-byte Folded Spill
+ sbbl $0, %ecx
+ movl %ecx, -128(%ebp) # 4-byte Spill
+ movl %edx, %eax
+ addl -176(%ebp), %eax # 4-byte Folded Reload
+ adcl -180(%ebp), %edi # 4-byte Folded Reload
+ movl %eax, 28(%esi)
+ movl -160(%ebp), %eax # 4-byte Reload
+ adcl -184(%ebp), %eax # 4-byte Folded Reload
+ movl %edi, 32(%esi)
+ movl -168(%ebp), %ecx # 4-byte Reload
+ adcl -188(%ebp), %ecx # 4-byte Folded Reload
+ movl %eax, 36(%esi)
+ adcl -192(%ebp), %ebx # 4-byte Folded Reload
+ movl %ecx, 40(%esi)
+ movl -172(%ebp), %eax # 4-byte Reload
+ adcl -196(%ebp), %eax # 4-byte Folded Reload
+ movl %ebx, 44(%esi)
+ movl -164(%ebp), %ecx # 4-byte Reload
+ adcl -200(%ebp), %ecx # 4-byte Folded Reload
+ movl %eax, 48(%esi)
+ movl -132(%ebp), %eax # 4-byte Reload
+ adcl -228(%ebp), %eax # 4-byte Folded Reload
+ movl %ecx, 52(%esi)
+ movl -156(%ebp), %edx # 4-byte Reload
+ adcl -232(%ebp), %edx # 4-byte Folded Reload
+ movl %eax, 56(%esi)
+ movl -136(%ebp), %ecx # 4-byte Reload
+ adcl -236(%ebp), %ecx # 4-byte Folded Reload
+ movl %edx, 60(%esi)
+ movl -140(%ebp), %eax # 4-byte Reload
+ adcl -240(%ebp), %eax # 4-byte Folded Reload
+ movl %ecx, 64(%esi)
+ movl -144(%ebp), %ecx # 4-byte Reload
+ adcl -244(%ebp), %ecx # 4-byte Folded Reload
+ movl %eax, 68(%esi)
+ movl -148(%ebp), %eax # 4-byte Reload
+ adcl -248(%ebp), %eax # 4-byte Folded Reload
+ movl %ecx, 72(%esi)
+ movl -152(%ebp), %ecx # 4-byte Reload
+ adcl -252(%ebp), %ecx # 4-byte Folded Reload
+ movl %eax, 76(%esi)
+ movl -128(%ebp), %eax # 4-byte Reload
+ adcl -256(%ebp), %eax # 4-byte Folded Reload
+ movl %ecx, 80(%esi)
+ movl %eax, 84(%esi)
+ movl -204(%ebp), %eax # 4-byte Reload
+ adcl $0, %eax
+ movl %eax, 88(%esi)
+ movl -208(%ebp), %eax # 4-byte Reload
+ adcl $0, %eax
+ movl %eax, 92(%esi)
+ movl -212(%ebp), %eax # 4-byte Reload
+ adcl $0, %eax
+ movl %eax, 96(%esi)
+ movl -216(%ebp), %eax # 4-byte Reload
+ adcl $0, %eax
+ movl %eax, 100(%esi)
+ movl -220(%ebp), %eax # 4-byte Reload
+ adcl $0, %eax
+ movl %eax, 104(%esi)
+ movl -224(%ebp), %eax # 4-byte Reload
+ adcl $0, %eax
+ movl %eax, 108(%esi)
+ addl $268, %esp # imm = 0x10C
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end209:
+ .size mcl_fpDbl_sqrPre14L, .Lfunc_end209-mcl_fpDbl_sqrPre14L
+
+ .globl mcl_fp_mont14L
+ .align 16, 0x90
+ .type mcl_fp_mont14L,@function
+mcl_fp_mont14L: # @mcl_fp_mont14L
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $1900, %esp # imm = 0x76C
+ calll .L210$pb
+.L210$pb:
+ popl %ebx
+.Ltmp41:
+ addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp41-.L210$pb), %ebx
+ movl 1932(%esp), %eax
+ movl -4(%eax), %esi
+ movl %esi, 48(%esp) # 4-byte Spill
+ movl 1928(%esp), %eax
+ movl (%eax), %eax
+ movl %eax, (%esp)
+ leal 1840(%esp), %ecx
+ movl 1924(%esp), %edx
+ calll .LmulPv448x32
+ movl 1840(%esp), %edi
+ movl 1844(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl %edi, %eax
+ imull %esi, %eax
+ movl 1896(%esp), %ecx
+ movl %ecx, 108(%esp) # 4-byte Spill
+ movl 1892(%esp), %ecx
+ movl %ecx, 104(%esp) # 4-byte Spill
+ movl 1888(%esp), %ecx
+ movl %ecx, 96(%esp) # 4-byte Spill
+ movl 1884(%esp), %ecx
+ movl %ecx, 88(%esp) # 4-byte Spill
+ movl 1880(%esp), %ecx
+ movl %ecx, 80(%esp) # 4-byte Spill
+ movl 1876(%esp), %ecx
+ movl %ecx, 64(%esp) # 4-byte Spill
+ movl 1872(%esp), %ecx
+ movl %ecx, 56(%esp) # 4-byte Spill
+ movl 1868(%esp), %ecx
+ movl %ecx, 52(%esp) # 4-byte Spill
+ movl 1864(%esp), %ecx
+ movl %ecx, 60(%esp) # 4-byte Spill
+ movl 1860(%esp), %esi
+ movl 1856(%esp), %ebp
+ movl 1852(%esp), %ecx
+ movl %ecx, 72(%esp) # 4-byte Spill
+ movl 1848(%esp), %ecx
+ movl %ecx, 84(%esp) # 4-byte Spill
+ movl %eax, (%esp)
+ leal 1776(%esp), %ecx
+ movl 1932(%esp), %edx
+ calll .LmulPv448x32
+ addl 1776(%esp), %edi
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 1780(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 1784(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 1788(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ adcl 1792(%esp), %ebp
+ adcl 1796(%esp), %esi
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 1800(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 1804(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 1808(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 1812(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 1816(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 1820(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 1824(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 1828(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 1832(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ sbbl %edi, %edi
+ movl 1928(%esp), %eax
+ movl 4(%eax), %eax
+ movl %eax, (%esp)
+ leal 1712(%esp), %ecx
+ movl 1924(%esp), %edx
+ calll .LmulPv448x32
+ andl $1, %edi
+ movl %edi, %edx
+ movl 100(%esp), %ecx # 4-byte Reload
+ addl 1712(%esp), %ecx
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 1716(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 1720(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ adcl 1724(%esp), %ebp
+ movl %ebp, 76(%esp) # 4-byte Spill
+ adcl 1728(%esp), %esi
+ movl %esi, 68(%esp) # 4-byte Spill
+ movl 60(%esp), %esi # 4-byte Reload
+ adcl 1732(%esp), %esi
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 1736(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 1740(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 64(%esp), %edi # 4-byte Reload
+ adcl 1744(%esp), %edi
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 1748(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 1752(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 1756(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 1760(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 1764(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ adcl 1768(%esp), %edx
+ movl %edx, 92(%esp) # 4-byte Spill
+ sbbl %eax, %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ movl %ecx, %ebp
+ imull 48(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 1648(%esp), %ecx
+ movl 1932(%esp), %edx
+ calll .LmulPv448x32
+ movl 100(%esp), %eax # 4-byte Reload
+ andl $1, %eax
+ addl 1648(%esp), %ebp
+ movl 84(%esp), %ecx # 4-byte Reload
+ adcl 1652(%esp), %ecx
+ movl %ecx, 84(%esp) # 4-byte Spill
+ movl 72(%esp), %ecx # 4-byte Reload
+ adcl 1656(%esp), %ecx
+ movl %ecx, 72(%esp) # 4-byte Spill
+ movl 76(%esp), %ecx # 4-byte Reload
+ adcl 1660(%esp), %ecx
+ movl %ecx, 76(%esp) # 4-byte Spill
+ movl 68(%esp), %ecx # 4-byte Reload
+ adcl 1664(%esp), %ecx
+ movl %ecx, 68(%esp) # 4-byte Spill
+ adcl 1668(%esp), %esi
+ movl %esi, 60(%esp) # 4-byte Spill
+ movl 52(%esp), %ebp # 4-byte Reload
+ adcl 1672(%esp), %ebp
+ movl 56(%esp), %ecx # 4-byte Reload
+ adcl 1676(%esp), %ecx
+ movl %ecx, 56(%esp) # 4-byte Spill
+ adcl 1680(%esp), %edi
+ movl %edi, 64(%esp) # 4-byte Spill
+ movl 80(%esp), %ecx # 4-byte Reload
+ adcl 1684(%esp), %ecx
+ movl %ecx, 80(%esp) # 4-byte Spill
+ movl 88(%esp), %ecx # 4-byte Reload
+ adcl 1688(%esp), %ecx
+ movl %ecx, 88(%esp) # 4-byte Spill
+ movl 96(%esp), %ecx # 4-byte Reload
+ adcl 1692(%esp), %ecx
+ movl %ecx, 96(%esp) # 4-byte Spill
+ movl 104(%esp), %ecx # 4-byte Reload
+ adcl 1696(%esp), %ecx
+ movl %ecx, 104(%esp) # 4-byte Spill
+ movl 108(%esp), %ecx # 4-byte Reload
+ adcl 1700(%esp), %ecx
+ movl %ecx, 108(%esp) # 4-byte Spill
+ movl 92(%esp), %esi # 4-byte Reload
+ adcl 1704(%esp), %esi
+ adcl $0, %eax
+ movl %eax, %edi
+ movl 1928(%esp), %eax
+ movl 8(%eax), %eax
+ movl %eax, (%esp)
+ leal 1584(%esp), %ecx
+ movl 1924(%esp), %edx
+ calll .LmulPv448x32
+ movl 84(%esp), %ecx # 4-byte Reload
+ addl 1584(%esp), %ecx
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 1588(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 1592(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 1596(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 1600(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ adcl 1604(%esp), %ebp
+ movl %ebp, 52(%esp) # 4-byte Spill
+ movl 56(%esp), %ebp # 4-byte Reload
+ adcl 1608(%esp), %ebp
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 1612(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 1616(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 1620(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 1624(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 1628(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 1632(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ adcl 1636(%esp), %esi
+ movl %esi, 92(%esp) # 4-byte Spill
+ adcl 1640(%esp), %edi
+ movl %edi, 100(%esp) # 4-byte Spill
+ sbbl %esi, %esi
+ movl %ecx, %edi
+ movl %edi, %eax
+ imull 48(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 1520(%esp), %ecx
+ movl 1932(%esp), %edx
+ calll .LmulPv448x32
+ andl $1, %esi
+ movl %esi, %ecx
+ addl 1520(%esp), %edi
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 1524(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 1528(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 1532(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 1536(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 1540(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ adcl 1544(%esp), %ebp
+ movl %ebp, 56(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 1548(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 1552(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 1556(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 1560(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 104(%esp), %ebp # 4-byte Reload
+ adcl 1564(%esp), %ebp
+ movl 108(%esp), %esi # 4-byte Reload
+ adcl 1568(%esp), %esi
+ movl 92(%esp), %edi # 4-byte Reload
+ adcl 1572(%esp), %edi
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 1576(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ adcl $0, %ecx
+ movl %ecx, 84(%esp) # 4-byte Spill
+ movl 1928(%esp), %eax
+ movl 12(%eax), %eax
+ movl %eax, (%esp)
+ leal 1456(%esp), %ecx
+ movl 1924(%esp), %edx
+ calll .LmulPv448x32
+ movl 72(%esp), %ecx # 4-byte Reload
+ addl 1456(%esp), %ecx
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 1460(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 1464(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 1468(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 1472(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 1476(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 1480(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 1484(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 1488(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 1492(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ adcl 1496(%esp), %ebp
+ movl %ebp, 104(%esp) # 4-byte Spill
+ adcl 1500(%esp), %esi
+ movl %esi, 108(%esp) # 4-byte Spill
+ adcl 1504(%esp), %edi
+ movl %edi, 92(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 1508(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 1512(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ sbbl %edi, %edi
+ movl %ecx, %esi
+ movl %esi, %eax
+ imull 48(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 1392(%esp), %ecx
+ movl 1932(%esp), %eax
+ movl %eax, %edx
+ calll .LmulPv448x32
+ andl $1, %edi
+ movl %edi, %eax
+ addl 1392(%esp), %esi
+ movl 76(%esp), %ecx # 4-byte Reload
+ adcl 1396(%esp), %ecx
+ movl %ecx, 76(%esp) # 4-byte Spill
+ movl 68(%esp), %ecx # 4-byte Reload
+ adcl 1400(%esp), %ecx
+ movl %ecx, 68(%esp) # 4-byte Spill
+ movl 60(%esp), %ecx # 4-byte Reload
+ adcl 1404(%esp), %ecx
+ movl %ecx, 60(%esp) # 4-byte Spill
+ movl 52(%esp), %ecx # 4-byte Reload
+ adcl 1408(%esp), %ecx
+ movl %ecx, 52(%esp) # 4-byte Spill
+ movl 56(%esp), %ecx # 4-byte Reload
+ adcl 1412(%esp), %ecx
+ movl %ecx, 56(%esp) # 4-byte Spill
+ movl 64(%esp), %ecx # 4-byte Reload
+ adcl 1416(%esp), %ecx
+ movl %ecx, 64(%esp) # 4-byte Spill
+ movl 80(%esp), %esi # 4-byte Reload
+ adcl 1420(%esp), %esi
+ movl 88(%esp), %ebp # 4-byte Reload
+ adcl 1424(%esp), %ebp
+ movl 96(%esp), %edi # 4-byte Reload
+ adcl 1428(%esp), %edi
+ movl 104(%esp), %ecx # 4-byte Reload
+ adcl 1432(%esp), %ecx
+ movl %ecx, 104(%esp) # 4-byte Spill
+ movl 108(%esp), %ecx # 4-byte Reload
+ adcl 1436(%esp), %ecx
+ movl %ecx, 108(%esp) # 4-byte Spill
+ movl 92(%esp), %ecx # 4-byte Reload
+ adcl 1440(%esp), %ecx
+ movl %ecx, 92(%esp) # 4-byte Spill
+ movl 100(%esp), %ecx # 4-byte Reload
+ adcl 1444(%esp), %ecx
+ movl %ecx, 100(%esp) # 4-byte Spill
+ movl 84(%esp), %ecx # 4-byte Reload
+ adcl 1448(%esp), %ecx
+ movl %ecx, 84(%esp) # 4-byte Spill
+ adcl $0, %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 1928(%esp), %eax
+ movl 16(%eax), %eax
+ movl %eax, (%esp)
+ leal 1328(%esp), %ecx
+ movl 1924(%esp), %eax
+ movl %eax, %edx
+ calll .LmulPv448x32
+ movl 76(%esp), %ecx # 4-byte Reload
+ addl 1328(%esp), %ecx
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 1332(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 1336(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 1340(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 1344(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 1348(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ adcl 1352(%esp), %esi
+ movl %esi, 80(%esp) # 4-byte Spill
+ adcl 1356(%esp), %ebp
+ movl %ebp, 88(%esp) # 4-byte Spill
+ adcl 1360(%esp), %edi
+ movl %edi, 96(%esp) # 4-byte Spill
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 1364(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 1368(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 1372(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 1376(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 1380(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 72(%esp), %edi # 4-byte Reload
+ adcl 1384(%esp), %edi
+ sbbl %esi, %esi
+ movl %ecx, %ebp
+ movl %ebp, %eax
+ imull 48(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 1264(%esp), %ecx
+ movl 1932(%esp), %edx
+ calll .LmulPv448x32
+ andl $1, %esi
+ movl %esi, %ecx
+ addl 1264(%esp), %ebp
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 1268(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 1272(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 1276(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 1280(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 64(%esp), %ebp # 4-byte Reload
+ adcl 1284(%esp), %ebp
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 1288(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 1292(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 1296(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 1300(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 1304(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 1308(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 1312(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 84(%esp), %esi # 4-byte Reload
+ adcl 1316(%esp), %esi
+ adcl 1320(%esp), %edi
+ movl %edi, 72(%esp) # 4-byte Spill
+ adcl $0, %ecx
+ movl %ecx, 76(%esp) # 4-byte Spill
+ movl 1928(%esp), %eax
+ movl 20(%eax), %eax
+ movl %eax, (%esp)
+ leal 1200(%esp), %ecx
+ movl 1924(%esp), %edx
+ calll .LmulPv448x32
+ movl 68(%esp), %eax # 4-byte Reload
+ addl 1200(%esp), %eax
+ movl 60(%esp), %ecx # 4-byte Reload
+ adcl 1204(%esp), %ecx
+ movl %ecx, 60(%esp) # 4-byte Spill
+ movl 52(%esp), %ecx # 4-byte Reload
+ adcl 1208(%esp), %ecx
+ movl %ecx, 52(%esp) # 4-byte Spill
+ movl 56(%esp), %edi # 4-byte Reload
+ adcl 1212(%esp), %edi
+ adcl 1216(%esp), %ebp
+ movl %ebp, 64(%esp) # 4-byte Spill
+ movl 80(%esp), %ecx # 4-byte Reload
+ adcl 1220(%esp), %ecx
+ movl %ecx, 80(%esp) # 4-byte Spill
+ movl 88(%esp), %ecx # 4-byte Reload
+ adcl 1224(%esp), %ecx
+ movl %ecx, 88(%esp) # 4-byte Spill
+ movl 96(%esp), %ecx # 4-byte Reload
+ adcl 1228(%esp), %ecx
+ movl %ecx, 96(%esp) # 4-byte Spill
+ movl 104(%esp), %ecx # 4-byte Reload
+ adcl 1232(%esp), %ecx
+ movl %ecx, 104(%esp) # 4-byte Spill
+ movl 108(%esp), %ecx # 4-byte Reload
+ adcl 1236(%esp), %ecx
+ movl %ecx, 108(%esp) # 4-byte Spill
+ movl 92(%esp), %ecx # 4-byte Reload
+ adcl 1240(%esp), %ecx
+ movl %ecx, 92(%esp) # 4-byte Spill
+ movl 100(%esp), %ecx # 4-byte Reload
+ adcl 1244(%esp), %ecx
+ movl %ecx, 100(%esp) # 4-byte Spill
+ adcl 1248(%esp), %esi
+ movl %esi, 84(%esp) # 4-byte Spill
+ movl 72(%esp), %ecx # 4-byte Reload
+ adcl 1252(%esp), %ecx
+ movl %ecx, 72(%esp) # 4-byte Spill
+ movl 76(%esp), %ecx # 4-byte Reload
+ adcl 1256(%esp), %ecx
+ movl %ecx, 76(%esp) # 4-byte Spill
+ sbbl %esi, %esi
+ movl %eax, %ebp
+ imull 48(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 1136(%esp), %ecx
+ movl 1932(%esp), %edx
+ calll .LmulPv448x32
+ andl $1, %esi
+ movl %esi, %ecx
+ addl 1136(%esp), %ebp
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 1140(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 52(%esp), %ebp # 4-byte Reload
+ adcl 1144(%esp), %ebp
+ adcl 1148(%esp), %edi
+ movl %edi, 56(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 1152(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 1156(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 1160(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 1164(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 1168(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 108(%esp), %esi # 4-byte Reload
+ adcl 1172(%esp), %esi
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 1176(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 100(%esp), %edi # 4-byte Reload
+ adcl 1180(%esp), %edi
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 1184(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 1188(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 1192(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ adcl $0, %ecx
+ movl %ecx, 68(%esp) # 4-byte Spill
+ movl 1928(%esp), %eax
+ movl 24(%eax), %eax
+ movl %eax, (%esp)
+ leal 1072(%esp), %ecx
+ movl 1924(%esp), %edx
+ calll .LmulPv448x32
+ movl 60(%esp), %eax # 4-byte Reload
+ addl 1072(%esp), %eax
+ adcl 1076(%esp), %ebp
+ movl %ebp, 52(%esp) # 4-byte Spill
+ movl 56(%esp), %ecx # 4-byte Reload
+ adcl 1080(%esp), %ecx
+ movl %ecx, 56(%esp) # 4-byte Spill
+ movl 64(%esp), %ecx # 4-byte Reload
+ adcl 1084(%esp), %ecx
+ movl %ecx, 64(%esp) # 4-byte Spill
+ movl 80(%esp), %ecx # 4-byte Reload
+ adcl 1088(%esp), %ecx
+ movl %ecx, 80(%esp) # 4-byte Spill
+ movl 88(%esp), %ecx # 4-byte Reload
+ adcl 1092(%esp), %ecx
+ movl %ecx, 88(%esp) # 4-byte Spill
+ movl 96(%esp), %ecx # 4-byte Reload
+ adcl 1096(%esp), %ecx
+ movl %ecx, 96(%esp) # 4-byte Spill
+ movl 104(%esp), %ecx # 4-byte Reload
+ adcl 1100(%esp), %ecx
+ movl %ecx, 104(%esp) # 4-byte Spill
+ adcl 1104(%esp), %esi
+ movl %esi, 108(%esp) # 4-byte Spill
+ movl 92(%esp), %ebp # 4-byte Reload
+ adcl 1108(%esp), %ebp
+ adcl 1112(%esp), %edi
+ movl %edi, 100(%esp) # 4-byte Spill
+ movl 84(%esp), %ecx # 4-byte Reload
+ adcl 1116(%esp), %ecx
+ movl %ecx, 84(%esp) # 4-byte Spill
+ movl 72(%esp), %ecx # 4-byte Reload
+ adcl 1120(%esp), %ecx
+ movl %ecx, 72(%esp) # 4-byte Spill
+ movl 76(%esp), %ecx # 4-byte Reload
+ adcl 1124(%esp), %ecx
+ movl %ecx, 76(%esp) # 4-byte Spill
+ movl 68(%esp), %ecx # 4-byte Reload
+ adcl 1128(%esp), %ecx
+ movl %ecx, 68(%esp) # 4-byte Spill
+ sbbl %esi, %esi
+ movl %eax, %edi
+ imull 48(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 1008(%esp), %ecx
+ movl 1932(%esp), %edx
+ calll .LmulPv448x32
+ andl $1, %esi
+ movl %esi, %ecx
+ addl 1008(%esp), %edi
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 1012(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 1016(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 64(%esp), %esi # 4-byte Reload
+ adcl 1020(%esp), %esi
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 1024(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 1028(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 1032(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 104(%esp), %edi # 4-byte Reload
+ adcl 1036(%esp), %edi
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 1040(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ adcl 1044(%esp), %ebp
+ movl %ebp, 92(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 1048(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 84(%esp), %ebp # 4-byte Reload
+ adcl 1052(%esp), %ebp
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 1056(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 1060(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 1064(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ adcl $0, %ecx
+ movl %ecx, 60(%esp) # 4-byte Spill
+ movl 1928(%esp), %eax
+ movl 28(%eax), %eax
+ movl %eax, (%esp)
+ leal 944(%esp), %ecx
+ movl 1924(%esp), %edx
+ calll .LmulPv448x32
+ movl 52(%esp), %eax # 4-byte Reload
+ addl 944(%esp), %eax
+ movl 56(%esp), %ecx # 4-byte Reload
+ adcl 948(%esp), %ecx
+ movl %ecx, 56(%esp) # 4-byte Spill
+ adcl 952(%esp), %esi
+ movl %esi, 64(%esp) # 4-byte Spill
+ movl 80(%esp), %ecx # 4-byte Reload
+ adcl 956(%esp), %ecx
+ movl %ecx, 80(%esp) # 4-byte Spill
+ movl 88(%esp), %ecx # 4-byte Reload
+ adcl 960(%esp), %ecx
+ movl %ecx, 88(%esp) # 4-byte Spill
+ movl 96(%esp), %esi # 4-byte Reload
+ adcl 964(%esp), %esi
+ adcl 968(%esp), %edi
+ movl %edi, 104(%esp) # 4-byte Spill
+ movl 108(%esp), %ecx # 4-byte Reload
+ adcl 972(%esp), %ecx
+ movl %ecx, 108(%esp) # 4-byte Spill
+ movl 92(%esp), %ecx # 4-byte Reload
+ adcl 976(%esp), %ecx
+ movl %ecx, 92(%esp) # 4-byte Spill
+ movl 100(%esp), %ecx # 4-byte Reload
+ adcl 980(%esp), %ecx
+ movl %ecx, 100(%esp) # 4-byte Spill
+ adcl 984(%esp), %ebp
+ movl %ebp, 84(%esp) # 4-byte Spill
+ movl 72(%esp), %ecx # 4-byte Reload
+ adcl 988(%esp), %ecx
+ movl %ecx, 72(%esp) # 4-byte Spill
+ movl 76(%esp), %ecx # 4-byte Reload
+ adcl 992(%esp), %ecx
+ movl %ecx, 76(%esp) # 4-byte Spill
+ movl 68(%esp), %ecx # 4-byte Reload
+ adcl 996(%esp), %ecx
+ movl %ecx, 68(%esp) # 4-byte Spill
+ movl 60(%esp), %ecx # 4-byte Reload
+ adcl 1000(%esp), %ecx
+ movl %ecx, 60(%esp) # 4-byte Spill
+ sbbl %ebp, %ebp
+ movl %eax, %edi
+ imull 48(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 880(%esp), %ecx
+ movl 1932(%esp), %edx
+ calll .LmulPv448x32
+ andl $1, %ebp
+ addl 880(%esp), %edi
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 884(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 888(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 892(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 88(%esp), %edi # 4-byte Reload
+ adcl 896(%esp), %edi
+ adcl 900(%esp), %esi
+ movl %esi, 96(%esp) # 4-byte Spill
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 904(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 908(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 912(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 916(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 920(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 72(%esp), %esi # 4-byte Reload
+ adcl 924(%esp), %esi
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 928(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 932(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 936(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ adcl $0, %ebp
+ movl %ebp, 52(%esp) # 4-byte Spill
+ movl 1928(%esp), %eax
+ movl 32(%eax), %eax
+ movl %eax, (%esp)
+ leal 816(%esp), %ecx
+ movl 1924(%esp), %edx
+ calll .LmulPv448x32
+ movl 56(%esp), %ecx # 4-byte Reload
+ addl 816(%esp), %ecx
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 820(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 80(%esp), %ebp # 4-byte Reload
+ adcl 824(%esp), %ebp
+ adcl 828(%esp), %edi
+ movl %edi, 88(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 832(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 836(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 840(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 844(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 848(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 852(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ adcl 856(%esp), %esi
+ movl %esi, 72(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 860(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 864(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 868(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 52(%esp), %esi # 4-byte Reload
+ adcl 872(%esp), %esi
+ sbbl %eax, %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ movl %ecx, %edi
+ imull 48(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 752(%esp), %ecx
+ movl 1932(%esp), %edx
+ calll .LmulPv448x32
+ movl 56(%esp), %eax # 4-byte Reload
+ andl $1, %eax
+ addl 752(%esp), %edi
+ movl 64(%esp), %ecx # 4-byte Reload
+ adcl 756(%esp), %ecx
+ movl %ecx, 64(%esp) # 4-byte Spill
+ adcl 760(%esp), %ebp
+ movl %ebp, 80(%esp) # 4-byte Spill
+ movl 88(%esp), %ecx # 4-byte Reload
+ adcl 764(%esp), %ecx
+ movl %ecx, 88(%esp) # 4-byte Spill
+ movl 96(%esp), %ecx # 4-byte Reload
+ adcl 768(%esp), %ecx
+ movl %ecx, 96(%esp) # 4-byte Spill
+ movl 104(%esp), %ecx # 4-byte Reload
+ adcl 772(%esp), %ecx
+ movl %ecx, 104(%esp) # 4-byte Spill
+ movl 108(%esp), %ecx # 4-byte Reload
+ adcl 776(%esp), %ecx
+ movl %ecx, 108(%esp) # 4-byte Spill
+ movl 92(%esp), %ecx # 4-byte Reload
+ adcl 780(%esp), %ecx
+ movl %ecx, 92(%esp) # 4-byte Spill
+ movl 100(%esp), %ecx # 4-byte Reload
+ adcl 784(%esp), %ecx
+ movl %ecx, 100(%esp) # 4-byte Spill
+ movl 84(%esp), %ecx # 4-byte Reload
+ adcl 788(%esp), %ecx
+ movl %ecx, 84(%esp) # 4-byte Spill
+ movl 72(%esp), %ecx # 4-byte Reload
+ adcl 792(%esp), %ecx
+ movl %ecx, 72(%esp) # 4-byte Spill
+ movl 76(%esp), %ebp # 4-byte Reload
+ adcl 796(%esp), %ebp
+ movl 68(%esp), %edi # 4-byte Reload
+ adcl 800(%esp), %edi
+ movl 60(%esp), %ecx # 4-byte Reload
+ adcl 804(%esp), %ecx
+ movl %ecx, 60(%esp) # 4-byte Spill
+ adcl 808(%esp), %esi
+ adcl $0, %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 1928(%esp), %eax
+ movl 36(%eax), %eax
+ movl %eax, (%esp)
+ leal 688(%esp), %ecx
+ movl 1924(%esp), %edx
+ calll .LmulPv448x32
+ movl 64(%esp), %ecx # 4-byte Reload
+ addl 688(%esp), %ecx
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 692(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 696(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 700(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 704(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 708(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 712(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 716(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 720(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 724(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ adcl 728(%esp), %ebp
+ movl %ebp, 76(%esp) # 4-byte Spill
+ adcl 732(%esp), %edi
+ movl %edi, 68(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 736(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ adcl 740(%esp), %esi
+ movl %esi, 52(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 744(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ sbbl %edi, %edi
+ movl %ecx, %eax
+ movl %ecx, %esi
+ imull 48(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 624(%esp), %ecx
+ movl 1932(%esp), %edx
+ calll .LmulPv448x32
+ movl %edi, %ecx
+ andl $1, %ecx
+ addl 624(%esp), %esi
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 628(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 632(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 96(%esp), %ebp # 4-byte Reload
+ adcl 636(%esp), %ebp
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 640(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 644(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 92(%esp), %esi # 4-byte Reload
+ adcl 648(%esp), %esi
+ movl 100(%esp), %edi # 4-byte Reload
+ adcl 652(%esp), %edi
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 656(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 660(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 664(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 668(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 672(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 676(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 680(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ adcl $0, %ecx
+ movl %ecx, 64(%esp) # 4-byte Spill
+ movl 1928(%esp), %eax
+ movl 40(%eax), %eax
+ movl %eax, (%esp)
+ leal 560(%esp), %ecx
+ movl 1924(%esp), %edx
+ calll .LmulPv448x32
+ movl 80(%esp), %ecx # 4-byte Reload
+ addl 560(%esp), %ecx
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 564(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ adcl 568(%esp), %ebp
+ movl %ebp, 96(%esp) # 4-byte Spill
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 572(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 576(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ adcl 580(%esp), %esi
+ movl %esi, 92(%esp) # 4-byte Spill
+ adcl 584(%esp), %edi
+ movl %edi, 100(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 588(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 72(%esp), %ebp # 4-byte Reload
+ adcl 592(%esp), %ebp
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 596(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 600(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 604(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 608(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 612(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 616(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ sbbl %edi, %edi
+ movl %ecx, %eax
+ movl %ecx, %esi
+ imull 48(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 496(%esp), %ecx
+ movl 1932(%esp), %edx
+ calll .LmulPv448x32
+ andl $1, %edi
+ movl %edi, %ecx
+ addl 496(%esp), %esi
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 500(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 504(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 104(%esp), %edi # 4-byte Reload
+ adcl 508(%esp), %edi
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 512(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 516(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 100(%esp), %esi # 4-byte Reload
+ adcl 520(%esp), %esi
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 524(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ adcl 528(%esp), %ebp
+ movl %ebp, 72(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 532(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 536(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 60(%esp), %ebp # 4-byte Reload
+ adcl 540(%esp), %ebp
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 544(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 548(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 552(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ adcl $0, %ecx
+ movl %ecx, 80(%esp) # 4-byte Spill
+ movl 1928(%esp), %eax
+ movl 44(%eax), %eax
+ movl %eax, (%esp)
+ leal 432(%esp), %ecx
+ movl 1924(%esp), %edx
+ calll .LmulPv448x32
+ movl 88(%esp), %ecx # 4-byte Reload
+ addl 432(%esp), %ecx
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 436(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ adcl 440(%esp), %edi
+ movl %edi, 104(%esp) # 4-byte Spill
+ movl 108(%esp), %edi # 4-byte Reload
+ adcl 444(%esp), %edi
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 448(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ adcl 452(%esp), %esi
+ movl %esi, 100(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 456(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 460(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 464(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 468(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ adcl 472(%esp), %ebp
+ movl %ebp, 60(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 476(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 480(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 484(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 488(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ sbbl %esi, %esi
+ movl %ecx, %eax
+ movl %ecx, %ebp
+ imull 48(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 368(%esp), %ecx
+ movl 1932(%esp), %edx
+ calll .LmulPv448x32
+ andl $1, %esi
+ movl %esi, %ecx
+ addl 368(%esp), %ebp
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 372(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 104(%esp), %esi # 4-byte Reload
+ adcl 376(%esp), %esi
+ adcl 380(%esp), %edi
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 384(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 388(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 84(%esp), %ebp # 4-byte Reload
+ adcl 392(%esp), %ebp
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 396(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 400(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 404(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 408(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 412(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 416(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 420(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 424(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ adcl $0, %ecx
+ movl %ecx, 88(%esp) # 4-byte Spill
+ movl 1928(%esp), %eax
+ movl 48(%eax), %eax
+ movl %eax, (%esp)
+ leal 304(%esp), %ecx
+ movl 1924(%esp), %edx
+ calll .LmulPv448x32
+ movl 96(%esp), %ecx # 4-byte Reload
+ addl 304(%esp), %ecx
+ adcl 308(%esp), %esi
+ movl %esi, 104(%esp) # 4-byte Spill
+ adcl 312(%esp), %edi
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 316(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 320(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ adcl 324(%esp), %ebp
+ movl %ebp, 84(%esp) # 4-byte Spill
+ movl 72(%esp), %esi # 4-byte Reload
+ adcl 328(%esp), %esi
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 332(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 336(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 340(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 344(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 348(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 352(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 356(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 360(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ sbbl %eax, %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ movl %ecx, %ebp
+ imull 48(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 240(%esp), %ecx
+ movl 1932(%esp), %edx
+ calll .LmulPv448x32
+ movl 96(%esp), %ecx # 4-byte Reload
+ andl $1, %ecx
+ addl 240(%esp), %ebp
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 244(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ adcl 248(%esp), %edi
+ movl %edi, 108(%esp) # 4-byte Spill
+ movl 92(%esp), %edi # 4-byte Reload
+ adcl 252(%esp), %edi
+ movl 100(%esp), %ebp # 4-byte Reload
+ adcl 256(%esp), %ebp
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 260(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ adcl 264(%esp), %esi
+ movl %esi, 72(%esp) # 4-byte Spill
+ movl 76(%esp), %esi # 4-byte Reload
+ adcl 268(%esp), %esi
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 272(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 276(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 280(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 284(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 288(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 292(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 296(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ adcl $0, %ecx
+ movl %ecx, 96(%esp) # 4-byte Spill
+ movl 1928(%esp), %eax
+ movl 52(%eax), %eax
+ movl %eax, (%esp)
+ leal 176(%esp), %ecx
+ movl 1924(%esp), %edx
+ calll .LmulPv448x32
+ movl 104(%esp), %ecx # 4-byte Reload
+ addl 176(%esp), %ecx
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 180(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ adcl 184(%esp), %edi
+ movl %edi, 92(%esp) # 4-byte Spill
+ adcl 188(%esp), %ebp
+ movl %ebp, 100(%esp) # 4-byte Spill
+ movl 84(%esp), %edi # 4-byte Reload
+ adcl 192(%esp), %edi
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 196(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ adcl 200(%esp), %esi
+ movl %esi, 76(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 204(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 208(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 212(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 216(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 220(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 224(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 228(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 232(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ sbbl %ebp, %ebp
+ movl 48(%esp), %eax # 4-byte Reload
+ imull %ecx, %eax
+ movl %ecx, %esi
+ movl %eax, (%esp)
+ leal 112(%esp), %ecx
+ movl 1932(%esp), %edx
+ calll .LmulPv448x32
+ andl $1, %ebp
+ addl 112(%esp), %esi
+ movl 100(%esp), %esi # 4-byte Reload
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 116(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 92(%esp), %edx # 4-byte Reload
+ adcl 120(%esp), %edx
+ movl %edx, 92(%esp) # 4-byte Spill
+ adcl 124(%esp), %esi
+ movl %esi, 100(%esp) # 4-byte Spill
+ adcl 128(%esp), %edi
+ movl %edi, 84(%esp) # 4-byte Spill
+ movl 72(%esp), %ecx # 4-byte Reload
+ adcl 132(%esp), %ecx
+ movl %ecx, 72(%esp) # 4-byte Spill
+ movl %ecx, %ebx
+ movl 76(%esp), %ecx # 4-byte Reload
+ adcl 136(%esp), %ecx
+ movl %ecx, 76(%esp) # 4-byte Spill
+ movl 68(%esp), %ecx # 4-byte Reload
+ adcl 140(%esp), %ecx
+ movl %ecx, 68(%esp) # 4-byte Spill
+ movl 60(%esp), %ecx # 4-byte Reload
+ adcl 144(%esp), %ecx
+ movl %ecx, 60(%esp) # 4-byte Spill
+ movl 52(%esp), %ecx # 4-byte Reload
+ adcl 148(%esp), %ecx
+ movl %ecx, 52(%esp) # 4-byte Spill
+ movl 56(%esp), %ecx # 4-byte Reload
+ adcl 152(%esp), %ecx
+ movl %ecx, 56(%esp) # 4-byte Spill
+ movl 64(%esp), %ecx # 4-byte Reload
+ adcl 156(%esp), %ecx
+ movl %ecx, 64(%esp) # 4-byte Spill
+ movl 80(%esp), %ecx # 4-byte Reload
+ adcl 160(%esp), %ecx
+ movl %ecx, 80(%esp) # 4-byte Spill
+ movl 88(%esp), %ecx # 4-byte Reload
+ adcl 164(%esp), %ecx
+ movl %ecx, 88(%esp) # 4-byte Spill
+ movl 96(%esp), %ecx # 4-byte Reload
+ adcl 168(%esp), %ecx
+ movl %ecx, 96(%esp) # 4-byte Spill
+ adcl $0, %ebp
+ movl 1932(%esp), %ecx
+ subl (%ecx), %eax
+ sbbl 4(%ecx), %edx
+ sbbl 8(%ecx), %esi
+ sbbl 12(%ecx), %edi
+ movl %edi, 16(%esp) # 4-byte Spill
+ sbbl 16(%ecx), %ebx
+ movl %ebx, 20(%esp) # 4-byte Spill
+ movl 76(%esp), %edi # 4-byte Reload
+ sbbl 20(%ecx), %edi
+ movl %edi, 24(%esp) # 4-byte Spill
+ movl 68(%esp), %edi # 4-byte Reload
+ sbbl 24(%ecx), %edi
+ movl %edi, 28(%esp) # 4-byte Spill
+ movl 60(%esp), %ebx # 4-byte Reload
+ sbbl 28(%ecx), %ebx
+ movl 52(%esp), %edi # 4-byte Reload
+ sbbl 32(%ecx), %edi
+ movl %edi, 32(%esp) # 4-byte Spill
+ movl 56(%esp), %edi # 4-byte Reload
+ sbbl 36(%ecx), %edi
+ movl %edi, 36(%esp) # 4-byte Spill
+ movl 64(%esp), %edi # 4-byte Reload
+ sbbl 40(%ecx), %edi
+ movl %edi, 40(%esp) # 4-byte Spill
+ movl 80(%esp), %edi # 4-byte Reload
+ sbbl 44(%ecx), %edi
+ movl %edi, 44(%esp) # 4-byte Spill
+ movl 88(%esp), %edi # 4-byte Reload
+ sbbl 48(%ecx), %edi
+ movl %edi, 48(%esp) # 4-byte Spill
+ movl 96(%esp), %edi # 4-byte Reload
+ sbbl 52(%ecx), %edi
+ movl %ebp, %ecx
+ movl %edi, 104(%esp) # 4-byte Spill
+ sbbl $0, %ecx
+ andl $1, %ecx
+ jne .LBB210_2
+# BB#1:
+ movl %ebx, 60(%esp) # 4-byte Spill
+.LBB210_2:
+ testb %cl, %cl
+ movl 108(%esp), %ebx # 4-byte Reload
+ jne .LBB210_4
+# BB#3:
+ movl %eax, %ebx
+.LBB210_4:
+ movl 1920(%esp), %eax
+ movl %ebx, (%eax)
+ movl 92(%esp), %edi # 4-byte Reload
+ movl 72(%esp), %ecx # 4-byte Reload
+ jne .LBB210_6
+# BB#5:
+ movl %edx, %edi
+.LBB210_6:
+ movl %edi, 4(%eax)
+ jne .LBB210_8
+# BB#7:
+ movl %esi, 100(%esp) # 4-byte Spill
+.LBB210_8:
+ movl 100(%esp), %edx # 4-byte Reload
+ movl %edx, 8(%eax)
+ jne .LBB210_10
+# BB#9:
+ movl 16(%esp), %edx # 4-byte Reload
+ movl %edx, 84(%esp) # 4-byte Spill
+.LBB210_10:
+ movl 84(%esp), %edx # 4-byte Reload
+ movl %edx, 12(%eax)
+ jne .LBB210_12
+# BB#11:
+ movl 20(%esp), %ecx # 4-byte Reload
+.LBB210_12:
+ movl %ecx, 16(%eax)
+ movl 76(%esp), %ecx # 4-byte Reload
+ jne .LBB210_14
+# BB#13:
+ movl 24(%esp), %ecx # 4-byte Reload
+.LBB210_14:
+ movl %ecx, 20(%eax)
+ movl 68(%esp), %ecx # 4-byte Reload
+ jne .LBB210_16
+# BB#15:
+ movl 28(%esp), %ecx # 4-byte Reload
+.LBB210_16:
+ movl %ecx, 24(%eax)
+ movl 60(%esp), %ecx # 4-byte Reload
+ movl %ecx, 28(%eax)
+ movl 52(%esp), %ecx # 4-byte Reload
+ jne .LBB210_18
+# BB#17:
+ movl 32(%esp), %ecx # 4-byte Reload
+.LBB210_18:
+ movl %ecx, 32(%eax)
+ movl 56(%esp), %ecx # 4-byte Reload
+ jne .LBB210_20
+# BB#19:
+ movl 36(%esp), %ecx # 4-byte Reload
+.LBB210_20:
+ movl %ecx, 36(%eax)
+ movl 64(%esp), %ecx # 4-byte Reload
+ jne .LBB210_22
+# BB#21:
+ movl 40(%esp), %ecx # 4-byte Reload
+.LBB210_22:
+ movl %ecx, 40(%eax)
+ movl 80(%esp), %ecx # 4-byte Reload
+ jne .LBB210_24
+# BB#23:
+ movl 44(%esp), %ecx # 4-byte Reload
+.LBB210_24:
+ movl %ecx, 44(%eax)
+ movl 88(%esp), %ecx # 4-byte Reload
+ jne .LBB210_26
+# BB#25:
+ movl 48(%esp), %ecx # 4-byte Reload
+.LBB210_26:
+ movl %ecx, 48(%eax)
+ movl 96(%esp), %ecx # 4-byte Reload
+ jne .LBB210_28
+# BB#27:
+ movl 104(%esp), %ecx # 4-byte Reload
+.LBB210_28:
+ movl %ecx, 52(%eax)
+ addl $1900, %esp # imm = 0x76C
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end210:
+ .size mcl_fp_mont14L, .Lfunc_end210-mcl_fp_mont14L
+
+ .globl mcl_fp_montNF14L
+ .align 16, 0x90
+ .type mcl_fp_montNF14L,@function
+mcl_fp_montNF14L: # @mcl_fp_montNF14L
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $1884, %esp # imm = 0x75C
+ calll .L211$pb
+.L211$pb:
+ popl %ebx
+.Ltmp42:
+ addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp42-.L211$pb), %ebx
+ movl 1916(%esp), %eax
+ movl -4(%eax), %esi
+ movl %esi, 36(%esp) # 4-byte Spill
+ movl 1912(%esp), %eax
+ movl (%eax), %eax
+ movl %eax, (%esp)
+ leal 1824(%esp), %ecx
+ movl 1908(%esp), %edx
+ calll .LmulPv448x32
+ movl 1824(%esp), %edi
+ movl 1828(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl %edi, %eax
+ imull %esi, %eax
+ movl 1880(%esp), %ecx
+ movl %ecx, 92(%esp) # 4-byte Spill
+ movl 1876(%esp), %ecx
+ movl %ecx, 80(%esp) # 4-byte Spill
+ movl 1872(%esp), %ecx
+ movl %ecx, 76(%esp) # 4-byte Spill
+ movl 1868(%esp), %ecx
+ movl %ecx, 84(%esp) # 4-byte Spill
+ movl 1864(%esp), %ecx
+ movl %ecx, 52(%esp) # 4-byte Spill
+ movl 1860(%esp), %ecx
+ movl %ecx, 56(%esp) # 4-byte Spill
+ movl 1856(%esp), %ecx
+ movl %ecx, 48(%esp) # 4-byte Spill
+ movl 1852(%esp), %ecx
+ movl %ecx, 44(%esp) # 4-byte Spill
+ movl 1848(%esp), %ecx
+ movl %ecx, 40(%esp) # 4-byte Spill
+ movl 1844(%esp), %ecx
+ movl %ecx, 60(%esp) # 4-byte Spill
+ movl 1840(%esp), %esi
+ movl 1836(%esp), %ebp
+ movl 1832(%esp), %ecx
+ movl %ecx, 88(%esp) # 4-byte Spill
+ movl %eax, (%esp)
+ leal 1760(%esp), %ecx
+ movl 1916(%esp), %edx
+ calll .LmulPv448x32
+ addl 1760(%esp), %edi
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 1764(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 1768(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ adcl 1772(%esp), %ebp
+ movl %ebp, 72(%esp) # 4-byte Spill
+ adcl 1776(%esp), %esi
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 1780(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 40(%esp), %eax # 4-byte Reload
+ adcl 1784(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 1788(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 1792(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 1796(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 1800(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 1804(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 76(%esp), %edi # 4-byte Reload
+ adcl 1808(%esp), %edi
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 1812(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 92(%esp), %ebp # 4-byte Reload
+ adcl 1816(%esp), %ebp
+ movl 1912(%esp), %eax
+ movl 4(%eax), %eax
+ movl %eax, (%esp)
+ leal 1696(%esp), %ecx
+ movl 1908(%esp), %edx
+ calll .LmulPv448x32
+ movl 1752(%esp), %eax
+ movl 68(%esp), %edx # 4-byte Reload
+ addl 1696(%esp), %edx
+ movl 88(%esp), %ecx # 4-byte Reload
+ adcl 1700(%esp), %ecx
+ movl %ecx, 88(%esp) # 4-byte Spill
+ movl 72(%esp), %ecx # 4-byte Reload
+ adcl 1704(%esp), %ecx
+ movl %ecx, 72(%esp) # 4-byte Spill
+ adcl 1708(%esp), %esi
+ movl %esi, 68(%esp) # 4-byte Spill
+ movl 60(%esp), %ecx # 4-byte Reload
+ adcl 1712(%esp), %ecx
+ movl %ecx, 60(%esp) # 4-byte Spill
+ movl 40(%esp), %ecx # 4-byte Reload
+ adcl 1716(%esp), %ecx
+ movl %ecx, 40(%esp) # 4-byte Spill
+ movl 44(%esp), %ecx # 4-byte Reload
+ adcl 1720(%esp), %ecx
+ movl %ecx, 44(%esp) # 4-byte Spill
+ movl 48(%esp), %ecx # 4-byte Reload
+ adcl 1724(%esp), %ecx
+ movl %ecx, 48(%esp) # 4-byte Spill
+ movl 56(%esp), %ecx # 4-byte Reload
+ adcl 1728(%esp), %ecx
+ movl %ecx, 56(%esp) # 4-byte Spill
+ movl 52(%esp), %ecx # 4-byte Reload
+ adcl 1732(%esp), %ecx
+ movl %ecx, 52(%esp) # 4-byte Spill
+ movl 84(%esp), %ecx # 4-byte Reload
+ adcl 1736(%esp), %ecx
+ movl %ecx, 84(%esp) # 4-byte Spill
+ adcl 1740(%esp), %edi
+ movl %edi, 76(%esp) # 4-byte Spill
+ movl 80(%esp), %edi # 4-byte Reload
+ adcl 1744(%esp), %edi
+ adcl 1748(%esp), %ebp
+ movl %ebp, 92(%esp) # 4-byte Spill
+ adcl $0, %eax
+ movl %eax, %ebp
+ movl %edx, %eax
+ movl %edx, %esi
+ imull 36(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 1632(%esp), %ecx
+ movl 1916(%esp), %eax
+ movl %eax, %edx
+ calll .LmulPv448x32
+ addl 1632(%esp), %esi
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 1636(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 1640(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 1644(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 1648(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 40(%esp), %eax # 4-byte Reload
+ adcl 1652(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 1656(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 1660(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 56(%esp), %esi # 4-byte Reload
+ adcl 1664(%esp), %esi
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 1668(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 1672(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 1676(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ adcl 1680(%esp), %edi
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 1684(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ adcl 1688(%esp), %ebp
+ movl 1912(%esp), %eax
+ movl 8(%eax), %eax
+ movl %eax, (%esp)
+ leal 1568(%esp), %ecx
+ movl 1908(%esp), %edx
+ calll .LmulPv448x32
+ movl 1624(%esp), %eax
+ movl 88(%esp), %edx # 4-byte Reload
+ addl 1568(%esp), %edx
+ movl 72(%esp), %ecx # 4-byte Reload
+ adcl 1572(%esp), %ecx
+ movl %ecx, 72(%esp) # 4-byte Spill
+ movl 68(%esp), %ecx # 4-byte Reload
+ adcl 1576(%esp), %ecx
+ movl %ecx, 68(%esp) # 4-byte Spill
+ movl 60(%esp), %ecx # 4-byte Reload
+ adcl 1580(%esp), %ecx
+ movl %ecx, 60(%esp) # 4-byte Spill
+ movl 40(%esp), %ecx # 4-byte Reload
+ adcl 1584(%esp), %ecx
+ movl %ecx, 40(%esp) # 4-byte Spill
+ movl 44(%esp), %ecx # 4-byte Reload
+ adcl 1588(%esp), %ecx
+ movl %ecx, 44(%esp) # 4-byte Spill
+ movl 48(%esp), %ecx # 4-byte Reload
+ adcl 1592(%esp), %ecx
+ movl %ecx, 48(%esp) # 4-byte Spill
+ adcl 1596(%esp), %esi
+ movl %esi, 56(%esp) # 4-byte Spill
+ movl 52(%esp), %ecx # 4-byte Reload
+ adcl 1600(%esp), %ecx
+ movl %ecx, 52(%esp) # 4-byte Spill
+ movl 84(%esp), %ecx # 4-byte Reload
+ adcl 1604(%esp), %ecx
+ movl %ecx, 84(%esp) # 4-byte Spill
+ movl 76(%esp), %ecx # 4-byte Reload
+ adcl 1608(%esp), %ecx
+ movl %ecx, 76(%esp) # 4-byte Spill
+ adcl 1612(%esp), %edi
+ movl 92(%esp), %ecx # 4-byte Reload
+ adcl 1616(%esp), %ecx
+ movl %ecx, 92(%esp) # 4-byte Spill
+ adcl 1620(%esp), %ebp
+ movl %ebp, 88(%esp) # 4-byte Spill
+ adcl $0, %eax
+ movl %eax, %ebp
+ movl %edx, %esi
+ movl %esi, %eax
+ imull 36(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 1504(%esp), %ecx
+ movl 1916(%esp), %edx
+ calll .LmulPv448x32
+ addl 1504(%esp), %esi
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 1508(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 1512(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 1516(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 40(%esp), %eax # 4-byte Reload
+ adcl 1520(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 1524(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 1528(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 1532(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 1536(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 1540(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 76(%esp), %esi # 4-byte Reload
+ adcl 1544(%esp), %esi
+ adcl 1548(%esp), %edi
+ movl %edi, 80(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 1552(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 1556(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ adcl 1560(%esp), %ebp
+ movl 1912(%esp), %eax
+ movl 12(%eax), %eax
+ movl %eax, (%esp)
+ leal 1440(%esp), %ecx
+ movl 1908(%esp), %eax
+ movl %eax, %edx
+ calll .LmulPv448x32
+ movl 1496(%esp), %eax
+ movl 72(%esp), %edx # 4-byte Reload
+ addl 1440(%esp), %edx
+ movl 68(%esp), %ecx # 4-byte Reload
+ adcl 1444(%esp), %ecx
+ movl %ecx, 68(%esp) # 4-byte Spill
+ movl 60(%esp), %ecx # 4-byte Reload
+ adcl 1448(%esp), %ecx
+ movl %ecx, 60(%esp) # 4-byte Spill
+ movl 40(%esp), %ecx # 4-byte Reload
+ adcl 1452(%esp), %ecx
+ movl %ecx, 40(%esp) # 4-byte Spill
+ movl 44(%esp), %ecx # 4-byte Reload
+ adcl 1456(%esp), %ecx
+ movl %ecx, 44(%esp) # 4-byte Spill
+ movl 48(%esp), %ecx # 4-byte Reload
+ adcl 1460(%esp), %ecx
+ movl %ecx, 48(%esp) # 4-byte Spill
+ movl 56(%esp), %edi # 4-byte Reload
+ adcl 1464(%esp), %edi
+ movl 52(%esp), %ecx # 4-byte Reload
+ adcl 1468(%esp), %ecx
+ movl %ecx, 52(%esp) # 4-byte Spill
+ movl 84(%esp), %ecx # 4-byte Reload
+ adcl 1472(%esp), %ecx
+ movl %ecx, 84(%esp) # 4-byte Spill
+ adcl 1476(%esp), %esi
+ movl %esi, 76(%esp) # 4-byte Spill
+ movl 80(%esp), %ecx # 4-byte Reload
+ adcl 1480(%esp), %ecx
+ movl %ecx, 80(%esp) # 4-byte Spill
+ movl 92(%esp), %ecx # 4-byte Reload
+ adcl 1484(%esp), %ecx
+ movl %ecx, 92(%esp) # 4-byte Spill
+ movl 88(%esp), %esi # 4-byte Reload
+ adcl 1488(%esp), %esi
+ adcl 1492(%esp), %ebp
+ movl %ebp, 64(%esp) # 4-byte Spill
+ adcl $0, %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl %edx, %eax
+ movl %edx, %ebp
+ imull 36(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 1376(%esp), %ecx
+ movl 1916(%esp), %edx
+ calll .LmulPv448x32
+ addl 1376(%esp), %ebp
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 1380(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 1384(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 40(%esp), %eax # 4-byte Reload
+ adcl 1388(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 1392(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 1396(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ adcl 1400(%esp), %edi
+ movl %edi, 56(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 1404(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 1408(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 1412(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 1416(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 1420(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ adcl 1424(%esp), %esi
+ movl %esi, 88(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 1428(%esp), %eax
+ movl %eax, %ebp
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 1432(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 1912(%esp), %eax
+ movl 16(%eax), %eax
+ movl %eax, (%esp)
+ leal 1312(%esp), %ecx
+ movl 1908(%esp), %edx
+ calll .LmulPv448x32
+ movl 1368(%esp), %eax
+ movl 68(%esp), %edx # 4-byte Reload
+ addl 1312(%esp), %edx
+ movl 60(%esp), %ecx # 4-byte Reload
+ adcl 1316(%esp), %ecx
+ movl %ecx, 60(%esp) # 4-byte Spill
+ movl 40(%esp), %ecx # 4-byte Reload
+ adcl 1320(%esp), %ecx
+ movl %ecx, 40(%esp) # 4-byte Spill
+ movl 44(%esp), %ecx # 4-byte Reload
+ adcl 1324(%esp), %ecx
+ movl %ecx, 44(%esp) # 4-byte Spill
+ movl 48(%esp), %edi # 4-byte Reload
+ adcl 1328(%esp), %edi
+ movl 56(%esp), %ecx # 4-byte Reload
+ adcl 1332(%esp), %ecx
+ movl %ecx, 56(%esp) # 4-byte Spill
+ movl 52(%esp), %ecx # 4-byte Reload
+ adcl 1336(%esp), %ecx
+ movl %ecx, 52(%esp) # 4-byte Spill
+ movl 84(%esp), %esi # 4-byte Reload
+ adcl 1340(%esp), %esi
+ movl 76(%esp), %ecx # 4-byte Reload
+ adcl 1344(%esp), %ecx
+ movl %ecx, 76(%esp) # 4-byte Spill
+ movl 80(%esp), %ecx # 4-byte Reload
+ adcl 1348(%esp), %ecx
+ movl %ecx, 80(%esp) # 4-byte Spill
+ movl 92(%esp), %ecx # 4-byte Reload
+ adcl 1352(%esp), %ecx
+ movl %ecx, 92(%esp) # 4-byte Spill
+ movl 88(%esp), %ecx # 4-byte Reload
+ adcl 1356(%esp), %ecx
+ movl %ecx, 88(%esp) # 4-byte Spill
+ adcl 1360(%esp), %ebp
+ movl %ebp, 64(%esp) # 4-byte Spill
+ movl 72(%esp), %ecx # 4-byte Reload
+ adcl 1364(%esp), %ecx
+ movl %ecx, 72(%esp) # 4-byte Spill
+ adcl $0, %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl %edx, %ebp
+ movl %ebp, %eax
+ imull 36(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 1248(%esp), %ecx
+ movl 1916(%esp), %edx
+ calll .LmulPv448x32
+ addl 1248(%esp), %ebp
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 1252(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 40(%esp), %eax # 4-byte Reload
+ adcl 1256(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 1260(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ adcl 1264(%esp), %edi
+ movl %edi, 48(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 1268(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 52(%esp), %ebp # 4-byte Reload
+ adcl 1272(%esp), %ebp
+ adcl 1276(%esp), %esi
+ movl %esi, 84(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 1280(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 80(%esp), %edi # 4-byte Reload
+ adcl 1284(%esp), %edi
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 1288(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 1292(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 1296(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 72(%esp), %esi # 4-byte Reload
+ adcl 1300(%esp), %esi
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 1304(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 1912(%esp), %eax
+ movl 20(%eax), %eax
+ movl %eax, (%esp)
+ leal 1184(%esp), %ecx
+ movl 1908(%esp), %edx
+ calll .LmulPv448x32
+ movl 1240(%esp), %edx
+ movl 60(%esp), %ecx # 4-byte Reload
+ addl 1184(%esp), %ecx
+ movl 40(%esp), %eax # 4-byte Reload
+ adcl 1188(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 1192(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 1196(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 1200(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ adcl 1204(%esp), %ebp
+ movl %ebp, 52(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 1208(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 1212(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ adcl 1216(%esp), %edi
+ movl %edi, 80(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 1220(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 1224(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 1228(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ adcl 1232(%esp), %esi
+ movl %esi, 72(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 1236(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ adcl $0, %edx
+ movl %edx, 60(%esp) # 4-byte Spill
+ movl %ecx, %esi
+ movl %esi, %eax
+ imull 36(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 1120(%esp), %ecx
+ movl 1916(%esp), %edx
+ calll .LmulPv448x32
+ addl 1120(%esp), %esi
+ movl 40(%esp), %ebp # 4-byte Reload
+ adcl 1124(%esp), %ebp
+ movl 44(%esp), %edi # 4-byte Reload
+ adcl 1128(%esp), %edi
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 1132(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 1136(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 1140(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 1144(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 1148(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 1152(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 92(%esp), %esi # 4-byte Reload
+ adcl 1156(%esp), %esi
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 1160(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 1164(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 1168(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 1172(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 1176(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 1912(%esp), %eax
+ movl 24(%eax), %eax
+ movl %eax, (%esp)
+ leal 1056(%esp), %ecx
+ movl 1908(%esp), %edx
+ calll .LmulPv448x32
+ movl 1112(%esp), %eax
+ movl %ebp, %ecx
+ addl 1056(%esp), %ecx
+ adcl 1060(%esp), %edi
+ movl %edi, 44(%esp) # 4-byte Spill
+ movl 48(%esp), %edx # 4-byte Reload
+ adcl 1064(%esp), %edx
+ movl %edx, 48(%esp) # 4-byte Spill
+ movl 56(%esp), %edx # 4-byte Reload
+ adcl 1068(%esp), %edx
+ movl %edx, 56(%esp) # 4-byte Spill
+ movl 52(%esp), %edx # 4-byte Reload
+ adcl 1072(%esp), %edx
+ movl %edx, 52(%esp) # 4-byte Spill
+ movl 84(%esp), %edx # 4-byte Reload
+ adcl 1076(%esp), %edx
+ movl %edx, 84(%esp) # 4-byte Spill
+ movl 76(%esp), %edx # 4-byte Reload
+ adcl 1080(%esp), %edx
+ movl %edx, 76(%esp) # 4-byte Spill
+ movl 80(%esp), %edx # 4-byte Reload
+ adcl 1084(%esp), %edx
+ movl %edx, 80(%esp) # 4-byte Spill
+ adcl 1088(%esp), %esi
+ movl %esi, 92(%esp) # 4-byte Spill
+ movl 88(%esp), %edx # 4-byte Reload
+ adcl 1092(%esp), %edx
+ movl %edx, 88(%esp) # 4-byte Spill
+ movl 64(%esp), %edx # 4-byte Reload
+ adcl 1096(%esp), %edx
+ movl %edx, 64(%esp) # 4-byte Spill
+ movl 72(%esp), %edx # 4-byte Reload
+ adcl 1100(%esp), %edx
+ movl %edx, 72(%esp) # 4-byte Spill
+ movl 68(%esp), %ebp # 4-byte Reload
+ adcl 1104(%esp), %ebp
+ movl 60(%esp), %edx # 4-byte Reload
+ adcl 1108(%esp), %edx
+ movl %edx, 60(%esp) # 4-byte Spill
+ adcl $0, %eax
+ movl %eax, %esi
+ movl %ecx, %edi
+ movl %edi, %eax
+ imull 36(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 992(%esp), %ecx
+ movl 1916(%esp), %edx
+ calll .LmulPv448x32
+ addl 992(%esp), %edi
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 996(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 1000(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 1004(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 52(%esp), %edi # 4-byte Reload
+ adcl 1008(%esp), %edi
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 1012(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 1016(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 1020(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 1024(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 1028(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 1032(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 1036(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ adcl 1040(%esp), %ebp
+ movl %ebp, 68(%esp) # 4-byte Spill
+ movl 60(%esp), %ebp # 4-byte Reload
+ adcl 1044(%esp), %ebp
+ adcl 1048(%esp), %esi
+ movl 1912(%esp), %eax
+ movl 28(%eax), %eax
+ movl %eax, (%esp)
+ leal 928(%esp), %ecx
+ movl 1908(%esp), %edx
+ calll .LmulPv448x32
+ movl 984(%esp), %eax
+ movl 44(%esp), %ecx # 4-byte Reload
+ addl 928(%esp), %ecx
+ movl 48(%esp), %edx # 4-byte Reload
+ adcl 932(%esp), %edx
+ movl %edx, 48(%esp) # 4-byte Spill
+ movl 56(%esp), %edx # 4-byte Reload
+ adcl 936(%esp), %edx
+ movl %edx, 56(%esp) # 4-byte Spill
+ adcl 940(%esp), %edi
+ movl 84(%esp), %edx # 4-byte Reload
+ adcl 944(%esp), %edx
+ movl %edx, 84(%esp) # 4-byte Spill
+ movl 76(%esp), %edx # 4-byte Reload
+ adcl 948(%esp), %edx
+ movl %edx, 76(%esp) # 4-byte Spill
+ movl 80(%esp), %edx # 4-byte Reload
+ adcl 952(%esp), %edx
+ movl %edx, 80(%esp) # 4-byte Spill
+ movl 92(%esp), %edx # 4-byte Reload
+ adcl 956(%esp), %edx
+ movl %edx, 92(%esp) # 4-byte Spill
+ movl 88(%esp), %edx # 4-byte Reload
+ adcl 960(%esp), %edx
+ movl %edx, 88(%esp) # 4-byte Spill
+ movl 64(%esp), %edx # 4-byte Reload
+ adcl 964(%esp), %edx
+ movl %edx, 64(%esp) # 4-byte Spill
+ movl 72(%esp), %edx # 4-byte Reload
+ adcl 968(%esp), %edx
+ movl %edx, 72(%esp) # 4-byte Spill
+ movl 68(%esp), %edx # 4-byte Reload
+ adcl 972(%esp), %edx
+ movl %edx, 68(%esp) # 4-byte Spill
+ adcl 976(%esp), %ebp
+ movl %ebp, 60(%esp) # 4-byte Spill
+ adcl 980(%esp), %esi
+ movl %esi, %ebp
+ adcl $0, %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ movl %ecx, %esi
+ imull 36(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 864(%esp), %ecx
+ movl 1916(%esp), %edx
+ calll .LmulPv448x32
+ addl 864(%esp), %esi
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 868(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 872(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ adcl 876(%esp), %edi
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 880(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 76(%esp), %esi # 4-byte Reload
+ adcl 884(%esp), %esi
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 888(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 892(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 896(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 900(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 904(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 908(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 912(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ adcl 916(%esp), %ebp
+ movl %ebp, 44(%esp) # 4-byte Spill
+ movl 40(%esp), %ebp # 4-byte Reload
+ adcl 920(%esp), %ebp
+ movl 1912(%esp), %eax
+ movl 32(%eax), %eax
+ movl %eax, (%esp)
+ leal 800(%esp), %ecx
+ movl 1908(%esp), %edx
+ calll .LmulPv448x32
+ movl 856(%esp), %edx
+ movl 48(%esp), %ecx # 4-byte Reload
+ addl 800(%esp), %ecx
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 804(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ adcl 808(%esp), %edi
+ movl %edi, 52(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 812(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ adcl 816(%esp), %esi
+ movl %esi, 76(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 820(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 824(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 88(%esp), %esi # 4-byte Reload
+ adcl 828(%esp), %esi
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 832(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 836(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 840(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 844(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 848(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ adcl 852(%esp), %ebp
+ movl %ebp, 40(%esp) # 4-byte Spill
+ adcl $0, %edx
+ movl %edx, 48(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ movl %ecx, %edi
+ imull 36(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 736(%esp), %ecx
+ movl 1916(%esp), %edx
+ calll .LmulPv448x32
+ addl 736(%esp), %edi
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 740(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 744(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 748(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 752(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 756(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 760(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ adcl 764(%esp), %esi
+ movl %esi, 88(%esp) # 4-byte Spill
+ movl 64(%esp), %ebp # 4-byte Reload
+ adcl 768(%esp), %ebp
+ movl 72(%esp), %esi # 4-byte Reload
+ adcl 772(%esp), %esi
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 776(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 60(%esp), %edi # 4-byte Reload
+ adcl 780(%esp), %edi
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 784(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 40(%esp), %eax # 4-byte Reload
+ adcl 788(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 792(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 1912(%esp), %eax
+ movl 36(%eax), %eax
+ movl %eax, (%esp)
+ leal 672(%esp), %ecx
+ movl 1908(%esp), %edx
+ calll .LmulPv448x32
+ movl 728(%esp), %edx
+ movl 56(%esp), %ecx # 4-byte Reload
+ addl 672(%esp), %ecx
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 676(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 680(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 684(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 688(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 692(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 696(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ adcl 700(%esp), %ebp
+ movl %ebp, 64(%esp) # 4-byte Spill
+ adcl 704(%esp), %esi
+ movl %esi, 72(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 708(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ adcl 712(%esp), %edi
+ movl %edi, 60(%esp) # 4-byte Spill
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 716(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 40(%esp), %eax # 4-byte Reload
+ adcl 720(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 724(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ adcl $0, %edx
+ movl %edx, 56(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ movl %ecx, %esi
+ imull 36(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 608(%esp), %ecx
+ movl 1916(%esp), %edx
+ calll .LmulPv448x32
+ addl 608(%esp), %esi
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 612(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 84(%esp), %ebp # 4-byte Reload
+ adcl 616(%esp), %ebp
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 620(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 80(%esp), %edi # 4-byte Reload
+ adcl 624(%esp), %edi
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 628(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 632(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 636(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 640(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 68(%esp), %esi # 4-byte Reload
+ adcl 644(%esp), %esi
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 648(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 652(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 40(%esp), %eax # 4-byte Reload
+ adcl 656(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 660(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 664(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 1912(%esp), %eax
+ movl 40(%eax), %eax
+ movl %eax, (%esp)
+ leal 544(%esp), %ecx
+ movl 1908(%esp), %edx
+ calll .LmulPv448x32
+ movl 600(%esp), %edx
+ movl 52(%esp), %ecx # 4-byte Reload
+ addl 544(%esp), %ecx
+ adcl 548(%esp), %ebp
+ movl %ebp, 84(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 552(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ adcl 556(%esp), %edi
+ movl %edi, 80(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 560(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 564(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 64(%esp), %ebp # 4-byte Reload
+ adcl 568(%esp), %ebp
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 572(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ adcl 576(%esp), %esi
+ movl %esi, 68(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 580(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 584(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 40(%esp), %eax # 4-byte Reload
+ adcl 588(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 592(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 596(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ adcl $0, %edx
+ movl %edx, 52(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ movl %ecx, %esi
+ imull 36(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 480(%esp), %ecx
+ movl 1916(%esp), %edx
+ calll .LmulPv448x32
+ addl 480(%esp), %esi
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 484(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 76(%esp), %edi # 4-byte Reload
+ adcl 488(%esp), %edi
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 492(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 92(%esp), %esi # 4-byte Reload
+ adcl 496(%esp), %esi
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 500(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ adcl 504(%esp), %ebp
+ movl %ebp, 64(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 508(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 512(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 516(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 520(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 40(%esp), %eax # 4-byte Reload
+ adcl 524(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 528(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 56(%esp), %ebp # 4-byte Reload
+ adcl 532(%esp), %ebp
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 536(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 1912(%esp), %eax
+ movl 44(%eax), %eax
+ movl %eax, (%esp)
+ leal 416(%esp), %ecx
+ movl 1908(%esp), %edx
+ calll .LmulPv448x32
+ movl 472(%esp), %edx
+ movl 84(%esp), %ecx # 4-byte Reload
+ addl 416(%esp), %ecx
+ adcl 420(%esp), %edi
+ movl %edi, 76(%esp) # 4-byte Spill
+ movl 80(%esp), %edi # 4-byte Reload
+ adcl 424(%esp), %edi
+ adcl 428(%esp), %esi
+ movl %esi, 92(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 432(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 436(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 440(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 444(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 448(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 452(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 40(%esp), %eax # 4-byte Reload
+ adcl 456(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 460(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ adcl 464(%esp), %ebp
+ movl %ebp, 56(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 468(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ adcl $0, %edx
+ movl %edx, 84(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ movl %ecx, %esi
+ imull 36(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 352(%esp), %ecx
+ movl 1916(%esp), %edx
+ calll .LmulPv448x32
+ addl 352(%esp), %esi
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 356(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ adcl 360(%esp), %edi
+ movl %edi, 80(%esp) # 4-byte Spill
+ movl 92(%esp), %esi # 4-byte Reload
+ adcl 364(%esp), %esi
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 368(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 372(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 376(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 380(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 384(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 44(%esp), %edi # 4-byte Reload
+ adcl 388(%esp), %edi
+ movl 40(%esp), %ebp # 4-byte Reload
+ adcl 392(%esp), %ebp
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 396(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 400(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 404(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 408(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 1912(%esp), %eax
+ movl 48(%eax), %eax
+ movl %eax, (%esp)
+ leal 288(%esp), %ecx
+ movl 1908(%esp), %edx
+ calll .LmulPv448x32
+ movl 344(%esp), %edx
+ movl 76(%esp), %ecx # 4-byte Reload
+ addl 288(%esp), %ecx
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 292(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ adcl 296(%esp), %esi
+ movl %esi, 92(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 300(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 304(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 308(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 312(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 316(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ adcl 320(%esp), %edi
+ adcl 324(%esp), %ebp
+ movl %ebp, 40(%esp) # 4-byte Spill
+ movl 48(%esp), %ebp # 4-byte Reload
+ adcl 328(%esp), %ebp
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 332(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 336(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 340(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ adcl $0, %edx
+ movl %edx, 76(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ movl %ecx, %esi
+ imull 36(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 224(%esp), %ecx
+ movl 1916(%esp), %edx
+ calll .LmulPv448x32
+ addl 224(%esp), %esi
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 228(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 92(%esp), %esi # 4-byte Reload
+ adcl 232(%esp), %esi
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 236(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 240(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 244(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 248(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 252(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ adcl 256(%esp), %edi
+ movl %edi, 44(%esp) # 4-byte Spill
+ movl 40(%esp), %edi # 4-byte Reload
+ adcl 260(%esp), %edi
+ adcl 264(%esp), %ebp
+ movl %ebp, 48(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 268(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 272(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 276(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 280(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 1912(%esp), %eax
+ movl 52(%eax), %eax
+ movl %eax, (%esp)
+ leal 160(%esp), %ecx
+ movl 1908(%esp), %edx
+ calll .LmulPv448x32
+ movl 216(%esp), %edx
+ movl 80(%esp), %ecx # 4-byte Reload
+ addl 160(%esp), %ecx
+ adcl 164(%esp), %esi
+ movl %esi, 92(%esp) # 4-byte Spill
+ movl 88(%esp), %ebp # 4-byte Reload
+ adcl 168(%esp), %ebp
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 172(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 176(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 180(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 184(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 188(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ adcl 192(%esp), %edi
+ movl %edi, 40(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 196(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 200(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 204(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 208(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 212(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ adcl $0, %edx
+ movl %edx, 80(%esp) # 4-byte Spill
+ movl 36(%esp), %eax # 4-byte Reload
+ imull %ecx, %eax
+ movl %ecx, %esi
+ movl %eax, (%esp)
+ leal 96(%esp), %ecx
+ movl 1916(%esp), %edx
+ calll .LmulPv448x32
+ addl 96(%esp), %esi
+ movl 64(%esp), %esi # 4-byte Reload
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 100(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 72(%esp), %edi # 4-byte Reload
+ adcl 104(%esp), %ebp
+ movl %ebp, 88(%esp) # 4-byte Spill
+ movl %ebp, %ebx
+ adcl 108(%esp), %esi
+ adcl 112(%esp), %edi
+ movl 68(%esp), %edx # 4-byte Reload
+ adcl 116(%esp), %edx
+ movl %edx, 68(%esp) # 4-byte Spill
+ movl 60(%esp), %edx # 4-byte Reload
+ adcl 120(%esp), %edx
+ movl %edx, 60(%esp) # 4-byte Spill
+ movl 44(%esp), %edx # 4-byte Reload
+ adcl 124(%esp), %edx
+ movl %edx, 44(%esp) # 4-byte Spill
+ movl 40(%esp), %edx # 4-byte Reload
+ adcl 128(%esp), %edx
+ movl %edx, 40(%esp) # 4-byte Spill
+ movl 48(%esp), %edx # 4-byte Reload
+ adcl 132(%esp), %edx
+ movl %edx, 48(%esp) # 4-byte Spill
+ movl 56(%esp), %edx # 4-byte Reload
+ adcl 136(%esp), %edx
+ movl %edx, 56(%esp) # 4-byte Spill
+ movl 52(%esp), %edx # 4-byte Reload
+ adcl 140(%esp), %edx
+ movl %edx, 52(%esp) # 4-byte Spill
+ movl 84(%esp), %edx # 4-byte Reload
+ adcl 144(%esp), %edx
+ movl %edx, 84(%esp) # 4-byte Spill
+ movl 76(%esp), %edx # 4-byte Reload
+ adcl 148(%esp), %edx
+ movl %edx, 76(%esp) # 4-byte Spill
+ movl 80(%esp), %edx # 4-byte Reload
+ adcl 152(%esp), %edx
+ movl %edx, 80(%esp) # 4-byte Spill
+ movl %eax, %edx
+ movl 1916(%esp), %ebp
+ subl (%ebp), %edx
+ sbbl 4(%ebp), %ebx
+ movl %esi, %eax
+ sbbl 8(%ebp), %eax
+ movl %edi, %ecx
+ sbbl 12(%ebp), %ecx
+ movl %ecx, 4(%esp) # 4-byte Spill
+ movl 68(%esp), %ecx # 4-byte Reload
+ sbbl 16(%ebp), %ecx
+ movl %ecx, 8(%esp) # 4-byte Spill
+ movl 60(%esp), %ecx # 4-byte Reload
+ sbbl 20(%ebp), %ecx
+ movl %ecx, 12(%esp) # 4-byte Spill
+ movl 44(%esp), %ecx # 4-byte Reload
+ sbbl 24(%ebp), %ecx
+ movl %ecx, 16(%esp) # 4-byte Spill
+ movl 40(%esp), %ecx # 4-byte Reload
+ sbbl 28(%ebp), %ecx
+ movl %ecx, 20(%esp) # 4-byte Spill
+ movl 48(%esp), %ecx # 4-byte Reload
+ sbbl 32(%ebp), %ecx
+ movl %ecx, 24(%esp) # 4-byte Spill
+ movl 56(%esp), %ecx # 4-byte Reload
+ sbbl 36(%ebp), %ecx
+ movl %ecx, 28(%esp) # 4-byte Spill
+ movl 52(%esp), %ecx # 4-byte Reload
+ sbbl 40(%ebp), %ecx
+ movl %ecx, 32(%esp) # 4-byte Spill
+ movl 84(%esp), %ecx # 4-byte Reload
+ sbbl 44(%ebp), %ecx
+ movl %ecx, 36(%esp) # 4-byte Spill
+ movl 76(%esp), %ecx # 4-byte Reload
+ sbbl 48(%ebp), %ecx
+ movl %ecx, 64(%esp) # 4-byte Spill
+ movl 80(%esp), %ecx # 4-byte Reload
+ sbbl 52(%ebp), %ecx
+ movl %ecx, 72(%esp) # 4-byte Spill
+ sarl $31, %ecx
+ testl %ecx, %ecx
+ movl 92(%esp), %ebp # 4-byte Reload
+ js .LBB211_2
+# BB#1:
+ movl %edx, %ebp
+.LBB211_2:
+ movl 1904(%esp), %edx
+ movl %ebp, (%edx)
+ movl 88(%esp), %ebp # 4-byte Reload
+ js .LBB211_4
+# BB#3:
+ movl %ebx, %ebp
+.LBB211_4:
+ movl %ebp, 4(%edx)
+ js .LBB211_6
+# BB#5:
+ movl %eax, %esi
+.LBB211_6:
+ movl %esi, 8(%edx)
+ js .LBB211_8
+# BB#7:
+ movl 4(%esp), %edi # 4-byte Reload
+.LBB211_8:
+ movl %edi, 12(%edx)
+ movl 68(%esp), %eax # 4-byte Reload
+ js .LBB211_10
+# BB#9:
+ movl 8(%esp), %eax # 4-byte Reload
+.LBB211_10:
+ movl %eax, 16(%edx)
+ movl 60(%esp), %eax # 4-byte Reload
+ js .LBB211_12
+# BB#11:
+ movl 12(%esp), %eax # 4-byte Reload
+.LBB211_12:
+ movl %eax, 20(%edx)
+ movl 44(%esp), %eax # 4-byte Reload
+ js .LBB211_14
+# BB#13:
+ movl 16(%esp), %eax # 4-byte Reload
+.LBB211_14:
+ movl %eax, 24(%edx)
+ movl 40(%esp), %eax # 4-byte Reload
+ js .LBB211_16
+# BB#15:
+ movl 20(%esp), %eax # 4-byte Reload
+.LBB211_16:
+ movl %eax, 28(%edx)
+ movl 48(%esp), %eax # 4-byte Reload
+ js .LBB211_18
+# BB#17:
+ movl 24(%esp), %eax # 4-byte Reload
+.LBB211_18:
+ movl %eax, 32(%edx)
+ movl 56(%esp), %eax # 4-byte Reload
+ js .LBB211_20
+# BB#19:
+ movl 28(%esp), %eax # 4-byte Reload
+.LBB211_20:
+ movl %eax, 36(%edx)
+ movl 52(%esp), %eax # 4-byte Reload
+ js .LBB211_22
+# BB#21:
+ movl 32(%esp), %eax # 4-byte Reload
+.LBB211_22:
+ movl %eax, 40(%edx)
+ movl 84(%esp), %eax # 4-byte Reload
+ js .LBB211_24
+# BB#23:
+ movl 36(%esp), %eax # 4-byte Reload
+.LBB211_24:
+ movl %eax, 44(%edx)
+ movl 76(%esp), %eax # 4-byte Reload
+ js .LBB211_26
+# BB#25:
+ movl 64(%esp), %eax # 4-byte Reload
+.LBB211_26:
+ movl %eax, 48(%edx)
+ movl 80(%esp), %eax # 4-byte Reload
+ js .LBB211_28
+# BB#27:
+ movl 72(%esp), %eax # 4-byte Reload
+.LBB211_28:
+ movl %eax, 52(%edx)
+ addl $1884, %esp # imm = 0x75C
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end211:
+ .size mcl_fp_montNF14L, .Lfunc_end211-mcl_fp_montNF14L
+
+ .globl mcl_fp_montRed14L
+ .align 16, 0x90
+ .type mcl_fp_montRed14L,@function
+mcl_fp_montRed14L: # @mcl_fp_montRed14L
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $1068, %esp # imm = 0x42C
+ calll .L212$pb
+.L212$pb:
+ popl %eax
+.Ltmp43:
+ addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp43-.L212$pb), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 1096(%esp), %edx
+ movl -4(%edx), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 1092(%esp), %ecx
+ movl (%ecx), %ebx
+ movl %ebx, 92(%esp) # 4-byte Spill
+ movl 4(%ecx), %edi
+ movl %edi, 96(%esp) # 4-byte Spill
+ imull %eax, %ebx
+ movl 108(%ecx), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 104(%ecx), %esi
+ movl %esi, 108(%esp) # 4-byte Spill
+ movl 100(%ecx), %esi
+ movl %esi, 120(%esp) # 4-byte Spill
+ movl 96(%ecx), %eax
+ movl %eax, 128(%esp) # 4-byte Spill
+ movl 92(%ecx), %esi
+ movl %esi, 140(%esp) # 4-byte Spill
+ movl 88(%ecx), %eax
+ movl %eax, 124(%esp) # 4-byte Spill
+ movl 84(%ecx), %eax
+ movl %eax, 152(%esp) # 4-byte Spill
+ movl 80(%ecx), %eax
+ movl %eax, 156(%esp) # 4-byte Spill
+ movl 76(%ecx), %eax
+ movl %eax, 148(%esp) # 4-byte Spill
+ movl 72(%ecx), %esi
+ movl %esi, 136(%esp) # 4-byte Spill
+ movl 68(%ecx), %esi
+ movl %esi, 168(%esp) # 4-byte Spill
+ movl 64(%ecx), %esi
+ movl %esi, 164(%esp) # 4-byte Spill
+ movl 60(%ecx), %eax
+ movl %eax, 160(%esp) # 4-byte Spill
+ movl 56(%ecx), %eax
+ movl %eax, 172(%esp) # 4-byte Spill
+ movl 52(%ecx), %edi
+ movl %edi, 144(%esp) # 4-byte Spill
+ movl 48(%ecx), %eax
+ movl %eax, 132(%esp) # 4-byte Spill
+ movl 44(%ecx), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 40(%ecx), %edi
+ movl %edi, 116(%esp) # 4-byte Spill
+ movl 36(%ecx), %ebp
+ movl 32(%ecx), %edi
+ movl 28(%ecx), %esi
+ movl 24(%ecx), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 20(%ecx), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 16(%ecx), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 12(%ecx), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 8(%ecx), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl (%edx), %ecx
+ movl %ecx, 16(%esp) # 4-byte Spill
+ movl 52(%edx), %ecx
+ movl %ecx, 60(%esp) # 4-byte Spill
+ movl 48(%edx), %ecx
+ movl %ecx, 56(%esp) # 4-byte Spill
+ movl 44(%edx), %ecx
+ movl %ecx, 52(%esp) # 4-byte Spill
+ movl 40(%edx), %ecx
+ movl %ecx, 48(%esp) # 4-byte Spill
+ movl 36(%edx), %ecx
+ movl %ecx, 44(%esp) # 4-byte Spill
+ movl 32(%edx), %ecx
+ movl %ecx, 40(%esp) # 4-byte Spill
+ movl 28(%edx), %ecx
+ movl %ecx, 36(%esp) # 4-byte Spill
+ movl 24(%edx), %ecx
+ movl %ecx, 32(%esp) # 4-byte Spill
+ movl 20(%edx), %ecx
+ movl %ecx, 28(%esp) # 4-byte Spill
+ movl 16(%edx), %ecx
+ movl %ecx, 24(%esp) # 4-byte Spill
+ movl 12(%edx), %ecx
+ movl %ecx, 20(%esp) # 4-byte Spill
+ movl 8(%edx), %ecx
+ movl %ecx, 12(%esp) # 4-byte Spill
+ movl 4(%edx), %ecx
+ movl %ecx, 8(%esp) # 4-byte Spill
+ movl %ebx, (%esp)
+ leal 1008(%esp), %ecx
+ movl 100(%esp), %ebx # 4-byte Reload
+ calll .LmulPv448x32
+ movl 92(%esp), %eax # 4-byte Reload
+ addl 1008(%esp), %eax
+ movl 96(%esp), %ecx # 4-byte Reload
+ adcl 1012(%esp), %ecx
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 1016(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 1020(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 1024(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 1028(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 1032(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ adcl 1036(%esp), %esi
+ movl %esi, 80(%esp) # 4-byte Spill
+ adcl 1040(%esp), %edi
+ movl %edi, 96(%esp) # 4-byte Spill
+ adcl 1044(%esp), %ebp
+ movl %ebp, 92(%esp) # 4-byte Spill
+ movl 116(%esp), %eax # 4-byte Reload
+ adcl 1048(%esp), %eax
+ movl %eax, 116(%esp) # 4-byte Spill
+ movl 112(%esp), %ebp # 4-byte Reload
+ adcl 1052(%esp), %ebp
+ movl 132(%esp), %eax # 4-byte Reload
+ adcl 1056(%esp), %eax
+ movl %eax, 132(%esp) # 4-byte Spill
+ movl 144(%esp), %eax # 4-byte Reload
+ adcl 1060(%esp), %eax
+ movl %eax, 144(%esp) # 4-byte Spill
+ movl 172(%esp), %eax # 4-byte Reload
+ adcl 1064(%esp), %eax
+ movl %eax, 172(%esp) # 4-byte Spill
+ adcl $0, 160(%esp) # 4-byte Folded Spill
+ adcl $0, 164(%esp) # 4-byte Folded Spill
+ adcl $0, 168(%esp) # 4-byte Folded Spill
+ adcl $0, 136(%esp) # 4-byte Folded Spill
+ adcl $0, 148(%esp) # 4-byte Folded Spill
+ adcl $0, 156(%esp) # 4-byte Folded Spill
+ adcl $0, 152(%esp) # 4-byte Folded Spill
+ adcl $0, 124(%esp) # 4-byte Folded Spill
+ adcl $0, 140(%esp) # 4-byte Folded Spill
+ adcl $0, 128(%esp) # 4-byte Folded Spill
+ adcl $0, 120(%esp) # 4-byte Folded Spill
+ adcl $0, 108(%esp) # 4-byte Folded Spill
+ adcl $0, 104(%esp) # 4-byte Folded Spill
+ sbbl %edi, %edi
+ movl %ecx, %esi
+ movl %esi, %eax
+ imull 88(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 944(%esp), %ecx
+ movl 1096(%esp), %edx
+ movl 100(%esp), %ebx # 4-byte Reload
+ calll .LmulPv448x32
+ movl %edi, %ecx
+ andl $1, %ecx
+ addl 944(%esp), %esi
+ movl 84(%esp), %edx # 4-byte Reload
+ adcl 948(%esp), %edx
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 952(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 956(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 960(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 964(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 968(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 972(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 92(%esp), %edi # 4-byte Reload
+ adcl 976(%esp), %edi
+ movl 116(%esp), %eax # 4-byte Reload
+ adcl 980(%esp), %eax
+ movl %eax, 116(%esp) # 4-byte Spill
+ adcl 984(%esp), %ebp
+ movl %ebp, 112(%esp) # 4-byte Spill
+ movl 132(%esp), %eax # 4-byte Reload
+ adcl 988(%esp), %eax
+ movl %eax, 132(%esp) # 4-byte Spill
+ movl 144(%esp), %eax # 4-byte Reload
+ adcl 992(%esp), %eax
+ movl %eax, 144(%esp) # 4-byte Spill
+ movl 172(%esp), %eax # 4-byte Reload
+ adcl 996(%esp), %eax
+ movl %eax, 172(%esp) # 4-byte Spill
+ movl 160(%esp), %esi # 4-byte Reload
+ adcl 1000(%esp), %esi
+ adcl $0, 164(%esp) # 4-byte Folded Spill
+ adcl $0, 168(%esp) # 4-byte Folded Spill
+ adcl $0, 136(%esp) # 4-byte Folded Spill
+ adcl $0, 148(%esp) # 4-byte Folded Spill
+ adcl $0, 156(%esp) # 4-byte Folded Spill
+ adcl $0, 152(%esp) # 4-byte Folded Spill
+ adcl $0, 124(%esp) # 4-byte Folded Spill
+ adcl $0, 140(%esp) # 4-byte Folded Spill
+ adcl $0, 128(%esp) # 4-byte Folded Spill
+ adcl $0, 120(%esp) # 4-byte Folded Spill
+ adcl $0, 108(%esp) # 4-byte Folded Spill
+ adcl $0, 104(%esp) # 4-byte Folded Spill
+ adcl $0, %ecx
+ movl %ecx, 84(%esp) # 4-byte Spill
+ movl %edx, %eax
+ movl %edx, %ebp
+ imull 88(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 880(%esp), %ecx
+ movl 1096(%esp), %edx
+ movl 100(%esp), %ebx # 4-byte Reload
+ calll .LmulPv448x32
+ addl 880(%esp), %ebp
+ movl 64(%esp), %ecx # 4-byte Reload
+ adcl 884(%esp), %ecx
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 888(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 892(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 896(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 900(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 904(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ adcl 908(%esp), %edi
+ movl %edi, 92(%esp) # 4-byte Spill
+ movl 116(%esp), %eax # 4-byte Reload
+ adcl 912(%esp), %eax
+ movl %eax, 116(%esp) # 4-byte Spill
+ movl 112(%esp), %eax # 4-byte Reload
+ adcl 916(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 132(%esp), %edi # 4-byte Reload
+ adcl 920(%esp), %edi
+ movl 144(%esp), %eax # 4-byte Reload
+ adcl 924(%esp), %eax
+ movl %eax, 144(%esp) # 4-byte Spill
+ movl 172(%esp), %eax # 4-byte Reload
+ adcl 928(%esp), %eax
+ movl %eax, 172(%esp) # 4-byte Spill
+ adcl 932(%esp), %esi
+ movl %esi, 160(%esp) # 4-byte Spill
+ movl 164(%esp), %eax # 4-byte Reload
+ adcl 936(%esp), %eax
+ movl %eax, 164(%esp) # 4-byte Spill
+ adcl $0, 168(%esp) # 4-byte Folded Spill
+ adcl $0, 136(%esp) # 4-byte Folded Spill
+ adcl $0, 148(%esp) # 4-byte Folded Spill
+ adcl $0, 156(%esp) # 4-byte Folded Spill
+ movl 152(%esp), %ebp # 4-byte Reload
+ adcl $0, %ebp
+ adcl $0, 124(%esp) # 4-byte Folded Spill
+ adcl $0, 140(%esp) # 4-byte Folded Spill
+ adcl $0, 128(%esp) # 4-byte Folded Spill
+ adcl $0, 120(%esp) # 4-byte Folded Spill
+ adcl $0, 108(%esp) # 4-byte Folded Spill
+ adcl $0, 104(%esp) # 4-byte Folded Spill
+ adcl $0, 84(%esp) # 4-byte Folded Spill
+ movl %ecx, %eax
+ movl %ecx, %esi
+ imull 88(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 816(%esp), %ecx
+ movl 1096(%esp), %edx
+ movl 100(%esp), %ebx # 4-byte Reload
+ calll .LmulPv448x32
+ addl 816(%esp), %esi
+ movl 68(%esp), %ecx # 4-byte Reload
+ adcl 820(%esp), %ecx
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 824(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 828(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 832(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 836(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 840(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 116(%esp), %eax # 4-byte Reload
+ adcl 844(%esp), %eax
+ movl %eax, 116(%esp) # 4-byte Spill
+ movl 112(%esp), %eax # 4-byte Reload
+ adcl 848(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ adcl 852(%esp), %edi
+ movl %edi, 132(%esp) # 4-byte Spill
+ movl 144(%esp), %eax # 4-byte Reload
+ adcl 856(%esp), %eax
+ movl %eax, 144(%esp) # 4-byte Spill
+ movl 172(%esp), %eax # 4-byte Reload
+ adcl 860(%esp), %eax
+ movl %eax, 172(%esp) # 4-byte Spill
+ movl 160(%esp), %eax # 4-byte Reload
+ adcl 864(%esp), %eax
+ movl %eax, 160(%esp) # 4-byte Spill
+ movl 164(%esp), %eax # 4-byte Reload
+ adcl 868(%esp), %eax
+ movl %eax, 164(%esp) # 4-byte Spill
+ movl 168(%esp), %eax # 4-byte Reload
+ adcl 872(%esp), %eax
+ movl %eax, 168(%esp) # 4-byte Spill
+ adcl $0, 136(%esp) # 4-byte Folded Spill
+ adcl $0, 148(%esp) # 4-byte Folded Spill
+ adcl $0, 156(%esp) # 4-byte Folded Spill
+ adcl $0, %ebp
+ movl %ebp, 152(%esp) # 4-byte Spill
+ adcl $0, 124(%esp) # 4-byte Folded Spill
+ adcl $0, 140(%esp) # 4-byte Folded Spill
+ movl 128(%esp), %edi # 4-byte Reload
+ adcl $0, %edi
+ adcl $0, 120(%esp) # 4-byte Folded Spill
+ adcl $0, 108(%esp) # 4-byte Folded Spill
+ movl 104(%esp), %esi # 4-byte Reload
+ adcl $0, %esi
+ adcl $0, 84(%esp) # 4-byte Folded Spill
+ movl %ecx, %ebp
+ movl %ebp, %eax
+ imull 88(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 752(%esp), %ecx
+ movl 1096(%esp), %edx
+ movl 100(%esp), %ebx # 4-byte Reload
+ calll .LmulPv448x32
+ addl 752(%esp), %ebp
+ movl 72(%esp), %ecx # 4-byte Reload
+ adcl 756(%esp), %ecx
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 760(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 764(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 768(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 772(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 116(%esp), %eax # 4-byte Reload
+ adcl 776(%esp), %eax
+ movl %eax, 116(%esp) # 4-byte Spill
+ movl 112(%esp), %eax # 4-byte Reload
+ adcl 780(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 132(%esp), %eax # 4-byte Reload
+ adcl 784(%esp), %eax
+ movl %eax, 132(%esp) # 4-byte Spill
+ movl 144(%esp), %eax # 4-byte Reload
+ adcl 788(%esp), %eax
+ movl %eax, 144(%esp) # 4-byte Spill
+ movl 172(%esp), %eax # 4-byte Reload
+ adcl 792(%esp), %eax
+ movl %eax, 172(%esp) # 4-byte Spill
+ movl 160(%esp), %eax # 4-byte Reload
+ adcl 796(%esp), %eax
+ movl %eax, 160(%esp) # 4-byte Spill
+ movl 164(%esp), %eax # 4-byte Reload
+ adcl 800(%esp), %eax
+ movl %eax, 164(%esp) # 4-byte Spill
+ movl 168(%esp), %eax # 4-byte Reload
+ adcl 804(%esp), %eax
+ movl %eax, 168(%esp) # 4-byte Spill
+ movl 136(%esp), %eax # 4-byte Reload
+ adcl 808(%esp), %eax
+ movl %eax, 136(%esp) # 4-byte Spill
+ adcl $0, 148(%esp) # 4-byte Folded Spill
+ movl 156(%esp), %ebp # 4-byte Reload
+ adcl $0, %ebp
+ adcl $0, 152(%esp) # 4-byte Folded Spill
+ adcl $0, 124(%esp) # 4-byte Folded Spill
+ adcl $0, 140(%esp) # 4-byte Folded Spill
+ adcl $0, %edi
+ movl %edi, 128(%esp) # 4-byte Spill
+ adcl $0, 120(%esp) # 4-byte Folded Spill
+ adcl $0, 108(%esp) # 4-byte Folded Spill
+ adcl $0, %esi
+ movl %esi, 104(%esp) # 4-byte Spill
+ adcl $0, 84(%esp) # 4-byte Folded Spill
+ movl %ecx, %esi
+ movl %esi, %eax
+ imull 88(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 688(%esp), %ecx
+ movl 1096(%esp), %edx
+ movl 100(%esp), %ebx # 4-byte Reload
+ calll .LmulPv448x32
+ addl 688(%esp), %esi
+ movl 76(%esp), %ecx # 4-byte Reload
+ adcl 692(%esp), %ecx
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 696(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 700(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 704(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 116(%esp), %eax # 4-byte Reload
+ adcl 708(%esp), %eax
+ movl %eax, 116(%esp) # 4-byte Spill
+ movl 112(%esp), %eax # 4-byte Reload
+ adcl 712(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 132(%esp), %eax # 4-byte Reload
+ adcl 716(%esp), %eax
+ movl %eax, 132(%esp) # 4-byte Spill
+ movl 144(%esp), %eax # 4-byte Reload
+ adcl 720(%esp), %eax
+ movl %eax, 144(%esp) # 4-byte Spill
+ movl 172(%esp), %eax # 4-byte Reload
+ adcl 724(%esp), %eax
+ movl %eax, 172(%esp) # 4-byte Spill
+ movl 160(%esp), %eax # 4-byte Reload
+ adcl 728(%esp), %eax
+ movl %eax, 160(%esp) # 4-byte Spill
+ movl 164(%esp), %eax # 4-byte Reload
+ adcl 732(%esp), %eax
+ movl %eax, 164(%esp) # 4-byte Spill
+ movl 168(%esp), %eax # 4-byte Reload
+ adcl 736(%esp), %eax
+ movl %eax, 168(%esp) # 4-byte Spill
+ movl 136(%esp), %eax # 4-byte Reload
+ adcl 740(%esp), %eax
+ movl %eax, 136(%esp) # 4-byte Spill
+ movl 148(%esp), %eax # 4-byte Reload
+ adcl 744(%esp), %eax
+ movl %eax, 148(%esp) # 4-byte Spill
+ adcl $0, %ebp
+ movl %ebp, 156(%esp) # 4-byte Spill
+ adcl $0, 152(%esp) # 4-byte Folded Spill
+ adcl $0, 124(%esp) # 4-byte Folded Spill
+ movl 140(%esp), %edi # 4-byte Reload
+ adcl $0, %edi
+ adcl $0, 128(%esp) # 4-byte Folded Spill
+ adcl $0, 120(%esp) # 4-byte Folded Spill
+ movl 108(%esp), %esi # 4-byte Reload
+ adcl $0, %esi
+ adcl $0, 104(%esp) # 4-byte Folded Spill
+ adcl $0, 84(%esp) # 4-byte Folded Spill
+ movl %ecx, %ebp
+ movl %ebp, %eax
+ imull 88(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 624(%esp), %ecx
+ movl 1096(%esp), %edx
+ movl 100(%esp), %ebx # 4-byte Reload
+ calll .LmulPv448x32
+ addl 624(%esp), %ebp
+ movl 80(%esp), %ecx # 4-byte Reload
+ adcl 628(%esp), %ecx
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 632(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 636(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 116(%esp), %eax # 4-byte Reload
+ adcl 640(%esp), %eax
+ movl %eax, 116(%esp) # 4-byte Spill
+ movl 112(%esp), %eax # 4-byte Reload
+ adcl 644(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 132(%esp), %eax # 4-byte Reload
+ adcl 648(%esp), %eax
+ movl %eax, 132(%esp) # 4-byte Spill
+ movl 144(%esp), %eax # 4-byte Reload
+ adcl 652(%esp), %eax
+ movl %eax, 144(%esp) # 4-byte Spill
+ movl 172(%esp), %eax # 4-byte Reload
+ adcl 656(%esp), %eax
+ movl %eax, 172(%esp) # 4-byte Spill
+ movl 160(%esp), %eax # 4-byte Reload
+ adcl 660(%esp), %eax
+ movl %eax, 160(%esp) # 4-byte Spill
+ movl 164(%esp), %ebp # 4-byte Reload
+ adcl 664(%esp), %ebp
+ movl 168(%esp), %eax # 4-byte Reload
+ adcl 668(%esp), %eax
+ movl %eax, 168(%esp) # 4-byte Spill
+ movl 136(%esp), %eax # 4-byte Reload
+ adcl 672(%esp), %eax
+ movl %eax, 136(%esp) # 4-byte Spill
+ movl 148(%esp), %eax # 4-byte Reload
+ adcl 676(%esp), %eax
+ movl %eax, 148(%esp) # 4-byte Spill
+ movl 156(%esp), %eax # 4-byte Reload
+ adcl 680(%esp), %eax
+ movl %eax, 156(%esp) # 4-byte Spill
+ adcl $0, 152(%esp) # 4-byte Folded Spill
+ adcl $0, 124(%esp) # 4-byte Folded Spill
+ adcl $0, %edi
+ movl %edi, 140(%esp) # 4-byte Spill
+ adcl $0, 128(%esp) # 4-byte Folded Spill
+ adcl $0, 120(%esp) # 4-byte Folded Spill
+ adcl $0, %esi
+ movl %esi, 108(%esp) # 4-byte Spill
+ adcl $0, 104(%esp) # 4-byte Folded Spill
+ adcl $0, 84(%esp) # 4-byte Folded Spill
+ movl %ecx, %esi
+ movl %esi, %eax
+ imull 88(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 560(%esp), %ecx
+ movl 1096(%esp), %edx
+ movl 100(%esp), %ebx # 4-byte Reload
+ calll .LmulPv448x32
+ addl 560(%esp), %esi
+ movl 96(%esp), %ecx # 4-byte Reload
+ adcl 564(%esp), %ecx
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 568(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 116(%esp), %eax # 4-byte Reload
+ adcl 572(%esp), %eax
+ movl %eax, 116(%esp) # 4-byte Spill
+ movl 112(%esp), %eax # 4-byte Reload
+ adcl 576(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 132(%esp), %eax # 4-byte Reload
+ adcl 580(%esp), %eax
+ movl %eax, 132(%esp) # 4-byte Spill
+ movl 144(%esp), %eax # 4-byte Reload
+ adcl 584(%esp), %eax
+ movl %eax, 144(%esp) # 4-byte Spill
+ movl 172(%esp), %eax # 4-byte Reload
+ adcl 588(%esp), %eax
+ movl %eax, 172(%esp) # 4-byte Spill
+ movl 160(%esp), %eax # 4-byte Reload
+ adcl 592(%esp), %eax
+ movl %eax, 160(%esp) # 4-byte Spill
+ adcl 596(%esp), %ebp
+ movl %ebp, 164(%esp) # 4-byte Spill
+ movl 168(%esp), %edi # 4-byte Reload
+ adcl 600(%esp), %edi
+ movl 136(%esp), %eax # 4-byte Reload
+ adcl 604(%esp), %eax
+ movl %eax, 136(%esp) # 4-byte Spill
+ movl 148(%esp), %eax # 4-byte Reload
+ adcl 608(%esp), %eax
+ movl %eax, 148(%esp) # 4-byte Spill
+ movl 156(%esp), %eax # 4-byte Reload
+ adcl 612(%esp), %eax
+ movl %eax, 156(%esp) # 4-byte Spill
+ movl 152(%esp), %eax # 4-byte Reload
+ adcl 616(%esp), %eax
+ movl %eax, 152(%esp) # 4-byte Spill
+ adcl $0, 124(%esp) # 4-byte Folded Spill
+ adcl $0, 140(%esp) # 4-byte Folded Spill
+ adcl $0, 128(%esp) # 4-byte Folded Spill
+ movl 120(%esp), %esi # 4-byte Reload
+ adcl $0, %esi
+ adcl $0, 108(%esp) # 4-byte Folded Spill
+ adcl $0, 104(%esp) # 4-byte Folded Spill
+ adcl $0, 84(%esp) # 4-byte Folded Spill
+ movl %ecx, %ebp
+ movl %ebp, %eax
+ imull 88(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 496(%esp), %ecx
+ movl 1096(%esp), %eax
+ movl %eax, %edx
+ movl 100(%esp), %ebx # 4-byte Reload
+ calll .LmulPv448x32
+ addl 496(%esp), %ebp
+ movl 92(%esp), %ecx # 4-byte Reload
+ adcl 500(%esp), %ecx
+ movl 116(%esp), %eax # 4-byte Reload
+ adcl 504(%esp), %eax
+ movl %eax, 116(%esp) # 4-byte Spill
+ movl 112(%esp), %eax # 4-byte Reload
+ adcl 508(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 132(%esp), %eax # 4-byte Reload
+ adcl 512(%esp), %eax
+ movl %eax, 132(%esp) # 4-byte Spill
+ movl 144(%esp), %ebp # 4-byte Reload
+ adcl 516(%esp), %ebp
+ movl 172(%esp), %eax # 4-byte Reload
+ adcl 520(%esp), %eax
+ movl %eax, 172(%esp) # 4-byte Spill
+ movl 160(%esp), %eax # 4-byte Reload
+ adcl 524(%esp), %eax
+ movl %eax, 160(%esp) # 4-byte Spill
+ movl 164(%esp), %eax # 4-byte Reload
+ adcl 528(%esp), %eax
+ movl %eax, 164(%esp) # 4-byte Spill
+ adcl 532(%esp), %edi
+ movl %edi, 168(%esp) # 4-byte Spill
+ movl 136(%esp), %edi # 4-byte Reload
+ adcl 536(%esp), %edi
+ movl 148(%esp), %eax # 4-byte Reload
+ adcl 540(%esp), %eax
+ movl %eax, 148(%esp) # 4-byte Spill
+ movl 156(%esp), %eax # 4-byte Reload
+ adcl 544(%esp), %eax
+ movl %eax, 156(%esp) # 4-byte Spill
+ movl 152(%esp), %eax # 4-byte Reload
+ adcl 548(%esp), %eax
+ movl %eax, 152(%esp) # 4-byte Spill
+ movl 124(%esp), %eax # 4-byte Reload
+ adcl 552(%esp), %eax
+ movl %eax, 124(%esp) # 4-byte Spill
+ adcl $0, 140(%esp) # 4-byte Folded Spill
+ adcl $0, 128(%esp) # 4-byte Folded Spill
+ adcl $0, %esi
+ movl %esi, 120(%esp) # 4-byte Spill
+ adcl $0, 108(%esp) # 4-byte Folded Spill
+ adcl $0, 104(%esp) # 4-byte Folded Spill
+ adcl $0, 84(%esp) # 4-byte Folded Spill
+ movl %ecx, %eax
+ movl %ecx, %esi
+ imull 88(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 432(%esp), %ecx
+ movl 1096(%esp), %edx
+ movl 100(%esp), %ebx # 4-byte Reload
+ calll .LmulPv448x32
+ addl 432(%esp), %esi
+ movl 116(%esp), %eax # 4-byte Reload
+ adcl 436(%esp), %eax
+ movl 112(%esp), %ecx # 4-byte Reload
+ adcl 440(%esp), %ecx
+ movl %ecx, 112(%esp) # 4-byte Spill
+ movl 132(%esp), %ecx # 4-byte Reload
+ adcl 444(%esp), %ecx
+ movl %ecx, 132(%esp) # 4-byte Spill
+ adcl 448(%esp), %ebp
+ movl %ebp, 144(%esp) # 4-byte Spill
+ movl 172(%esp), %ecx # 4-byte Reload
+ adcl 452(%esp), %ecx
+ movl %ecx, 172(%esp) # 4-byte Spill
+ movl 160(%esp), %ebp # 4-byte Reload
+ adcl 456(%esp), %ebp
+ movl 164(%esp), %ecx # 4-byte Reload
+ adcl 460(%esp), %ecx
+ movl %ecx, 164(%esp) # 4-byte Spill
+ movl 168(%esp), %ecx # 4-byte Reload
+ adcl 464(%esp), %ecx
+ movl %ecx, 168(%esp) # 4-byte Spill
+ adcl 468(%esp), %edi
+ movl %edi, 136(%esp) # 4-byte Spill
+ movl 148(%esp), %ecx # 4-byte Reload
+ adcl 472(%esp), %ecx
+ movl %ecx, 148(%esp) # 4-byte Spill
+ movl 156(%esp), %ecx # 4-byte Reload
+ adcl 476(%esp), %ecx
+ movl %ecx, 156(%esp) # 4-byte Spill
+ movl 152(%esp), %ecx # 4-byte Reload
+ adcl 480(%esp), %ecx
+ movl %ecx, 152(%esp) # 4-byte Spill
+ movl 124(%esp), %ecx # 4-byte Reload
+ adcl 484(%esp), %ecx
+ movl %ecx, 124(%esp) # 4-byte Spill
+ movl 140(%esp), %ecx # 4-byte Reload
+ adcl 488(%esp), %ecx
+ movl %ecx, 140(%esp) # 4-byte Spill
+ adcl $0, 128(%esp) # 4-byte Folded Spill
+ adcl $0, 120(%esp) # 4-byte Folded Spill
+ adcl $0, 108(%esp) # 4-byte Folded Spill
+ adcl $0, 104(%esp) # 4-byte Folded Spill
+ adcl $0, 84(%esp) # 4-byte Folded Spill
+ movl %eax, %esi
+ movl 88(%esp), %edi # 4-byte Reload
+ imull %edi, %eax
+ movl %eax, (%esp)
+ leal 368(%esp), %ecx
+ movl 1096(%esp), %edx
+ movl 100(%esp), %ebx # 4-byte Reload
+ calll .LmulPv448x32
+ addl 368(%esp), %esi
+ movl 112(%esp), %eax # 4-byte Reload
+ adcl 372(%esp), %eax
+ movl 132(%esp), %ecx # 4-byte Reload
+ adcl 376(%esp), %ecx
+ movl %ecx, 132(%esp) # 4-byte Spill
+ movl 144(%esp), %ecx # 4-byte Reload
+ adcl 380(%esp), %ecx
+ movl %ecx, 144(%esp) # 4-byte Spill
+ movl 172(%esp), %esi # 4-byte Reload
+ adcl 384(%esp), %esi
+ adcl 388(%esp), %ebp
+ movl %ebp, 160(%esp) # 4-byte Spill
+ movl 164(%esp), %ecx # 4-byte Reload
+ adcl 392(%esp), %ecx
+ movl %ecx, 164(%esp) # 4-byte Spill
+ movl 168(%esp), %ecx # 4-byte Reload
+ adcl 396(%esp), %ecx
+ movl %ecx, 168(%esp) # 4-byte Spill
+ movl 136(%esp), %ecx # 4-byte Reload
+ adcl 400(%esp), %ecx
+ movl %ecx, 136(%esp) # 4-byte Spill
+ movl 148(%esp), %ecx # 4-byte Reload
+ adcl 404(%esp), %ecx
+ movl %ecx, 148(%esp) # 4-byte Spill
+ movl 156(%esp), %ecx # 4-byte Reload
+ adcl 408(%esp), %ecx
+ movl %ecx, 156(%esp) # 4-byte Spill
+ movl 152(%esp), %ecx # 4-byte Reload
+ adcl 412(%esp), %ecx
+ movl %ecx, 152(%esp) # 4-byte Spill
+ movl 124(%esp), %ecx # 4-byte Reload
+ adcl 416(%esp), %ecx
+ movl %ecx, 124(%esp) # 4-byte Spill
+ movl 140(%esp), %ecx # 4-byte Reload
+ adcl 420(%esp), %ecx
+ movl %ecx, 140(%esp) # 4-byte Spill
+ movl 128(%esp), %ecx # 4-byte Reload
+ adcl 424(%esp), %ecx
+ movl %ecx, 128(%esp) # 4-byte Spill
+ adcl $0, 120(%esp) # 4-byte Folded Spill
+ adcl $0, 108(%esp) # 4-byte Folded Spill
+ adcl $0, 104(%esp) # 4-byte Folded Spill
+ adcl $0, 84(%esp) # 4-byte Folded Spill
+ movl %eax, %ebp
+ imull %edi, %eax
+ movl %eax, (%esp)
+ leal 304(%esp), %ecx
+ movl 1096(%esp), %edx
+ movl 100(%esp), %ebx # 4-byte Reload
+ calll .LmulPv448x32
+ addl 304(%esp), %ebp
+ movl 132(%esp), %edi # 4-byte Reload
+ adcl 308(%esp), %edi
+ movl 144(%esp), %eax # 4-byte Reload
+ adcl 312(%esp), %eax
+ movl %eax, 144(%esp) # 4-byte Spill
+ movl %esi, %ebp
+ adcl 316(%esp), %ebp
+ movl 160(%esp), %eax # 4-byte Reload
+ adcl 320(%esp), %eax
+ movl %eax, 160(%esp) # 4-byte Spill
+ movl 164(%esp), %eax # 4-byte Reload
+ adcl 324(%esp), %eax
+ movl %eax, 164(%esp) # 4-byte Spill
+ movl 168(%esp), %eax # 4-byte Reload
+ adcl 328(%esp), %eax
+ movl %eax, 168(%esp) # 4-byte Spill
+ movl 136(%esp), %esi # 4-byte Reload
+ adcl 332(%esp), %esi
+ movl 148(%esp), %eax # 4-byte Reload
+ adcl 336(%esp), %eax
+ movl %eax, 148(%esp) # 4-byte Spill
+ movl 156(%esp), %eax # 4-byte Reload
+ adcl 340(%esp), %eax
+ movl %eax, 156(%esp) # 4-byte Spill
+ movl 152(%esp), %eax # 4-byte Reload
+ adcl 344(%esp), %eax
+ movl %eax, 152(%esp) # 4-byte Spill
+ movl 124(%esp), %eax # 4-byte Reload
+ adcl 348(%esp), %eax
+ movl %eax, 124(%esp) # 4-byte Spill
+ movl 140(%esp), %eax # 4-byte Reload
+ adcl 352(%esp), %eax
+ movl %eax, 140(%esp) # 4-byte Spill
+ movl 128(%esp), %eax # 4-byte Reload
+ adcl 356(%esp), %eax
+ movl %eax, 128(%esp) # 4-byte Spill
+ movl 120(%esp), %eax # 4-byte Reload
+ adcl 360(%esp), %eax
+ movl %eax, 120(%esp) # 4-byte Spill
+ adcl $0, 108(%esp) # 4-byte Folded Spill
+ adcl $0, 104(%esp) # 4-byte Folded Spill
+ adcl $0, 84(%esp) # 4-byte Folded Spill
+ movl %edi, %eax
+ imull 88(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 240(%esp), %ecx
+ movl 1096(%esp), %edx
+ movl 100(%esp), %ebx # 4-byte Reload
+ calll .LmulPv448x32
+ addl 240(%esp), %edi
+ movl 144(%esp), %ecx # 4-byte Reload
+ adcl 244(%esp), %ecx
+ adcl 248(%esp), %ebp
+ movl %ebp, 172(%esp) # 4-byte Spill
+ movl 160(%esp), %eax # 4-byte Reload
+ adcl 252(%esp), %eax
+ movl %eax, 160(%esp) # 4-byte Spill
+ movl 164(%esp), %eax # 4-byte Reload
+ adcl 256(%esp), %eax
+ movl %eax, 164(%esp) # 4-byte Spill
+ movl 168(%esp), %eax # 4-byte Reload
+ adcl 260(%esp), %eax
+ movl %eax, 168(%esp) # 4-byte Spill
+ adcl 264(%esp), %esi
+ movl %esi, 136(%esp) # 4-byte Spill
+ movl 148(%esp), %edi # 4-byte Reload
+ adcl 268(%esp), %edi
+ movl 156(%esp), %eax # 4-byte Reload
+ adcl 272(%esp), %eax
+ movl %eax, 156(%esp) # 4-byte Spill
+ movl 152(%esp), %eax # 4-byte Reload
+ adcl 276(%esp), %eax
+ movl %eax, 152(%esp) # 4-byte Spill
+ movl 124(%esp), %ebp # 4-byte Reload
+ adcl 280(%esp), %ebp
+ movl 140(%esp), %eax # 4-byte Reload
+ adcl 284(%esp), %eax
+ movl %eax, 140(%esp) # 4-byte Spill
+ movl 128(%esp), %eax # 4-byte Reload
+ adcl 288(%esp), %eax
+ movl %eax, 128(%esp) # 4-byte Spill
+ movl 120(%esp), %eax # 4-byte Reload
+ adcl 292(%esp), %eax
+ movl %eax, 120(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 296(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ adcl $0, 104(%esp) # 4-byte Folded Spill
+ adcl $0, 84(%esp) # 4-byte Folded Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ imull %ecx, %eax
+ movl %ecx, %esi
+ movl %eax, (%esp)
+ leal 176(%esp), %ecx
+ movl 1096(%esp), %edx
+ movl 100(%esp), %ebx # 4-byte Reload
+ calll .LmulPv448x32
+ addl 176(%esp), %esi
+ movl 172(%esp), %eax # 4-byte Reload
+ adcl 180(%esp), %eax
+ movl %eax, 172(%esp) # 4-byte Spill
+ movl 160(%esp), %eax # 4-byte Reload
+ adcl 184(%esp), %eax
+ movl %eax, 160(%esp) # 4-byte Spill
+ movl 164(%esp), %ebx # 4-byte Reload
+ adcl 188(%esp), %ebx
+ movl %ebx, 164(%esp) # 4-byte Spill
+ movl 168(%esp), %eax # 4-byte Reload
+ adcl 192(%esp), %eax
+ movl %eax, 168(%esp) # 4-byte Spill
+ movl 136(%esp), %edx # 4-byte Reload
+ adcl 196(%esp), %edx
+ movl %edx, 136(%esp) # 4-byte Spill
+ movl %edi, %eax
+ adcl 200(%esp), %eax
+ movl %eax, 148(%esp) # 4-byte Spill
+ movl 156(%esp), %eax # 4-byte Reload
+ adcl 204(%esp), %eax
+ movl %eax, 156(%esp) # 4-byte Spill
+ movl 152(%esp), %eax # 4-byte Reload
+ adcl 208(%esp), %eax
+ movl %eax, 152(%esp) # 4-byte Spill
+ adcl 212(%esp), %ebp
+ movl %ebp, 124(%esp) # 4-byte Spill
+ movl 140(%esp), %eax # 4-byte Reload
+ adcl 216(%esp), %eax
+ movl %eax, 140(%esp) # 4-byte Spill
+ movl 128(%esp), %eax # 4-byte Reload
+ adcl 220(%esp), %eax
+ movl %eax, 128(%esp) # 4-byte Spill
+ movl 120(%esp), %eax # 4-byte Reload
+ adcl 224(%esp), %eax
+ movl %eax, 120(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 228(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 104(%esp), %ecx # 4-byte Reload
+ adcl 232(%esp), %ecx
+ movl 84(%esp), %esi # 4-byte Reload
+ adcl $0, %esi
+ movl 172(%esp), %edi # 4-byte Reload
+ subl 16(%esp), %edi # 4-byte Folded Reload
+ movl 160(%esp), %ebp # 4-byte Reload
+ sbbl 8(%esp), %ebp # 4-byte Folded Reload
+ sbbl 12(%esp), %ebx # 4-byte Folded Reload
+ movl 168(%esp), %eax # 4-byte Reload
+ sbbl 20(%esp), %eax # 4-byte Folded Reload
+ sbbl 24(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 80(%esp) # 4-byte Spill
+ movl 148(%esp), %edx # 4-byte Reload
+ sbbl 28(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 84(%esp) # 4-byte Spill
+ movl 156(%esp), %edx # 4-byte Reload
+ sbbl 32(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 88(%esp) # 4-byte Spill
+ movl 152(%esp), %edx # 4-byte Reload
+ sbbl 36(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 92(%esp) # 4-byte Spill
+ movl 124(%esp), %edx # 4-byte Reload
+ sbbl 40(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 96(%esp) # 4-byte Spill
+ movl 140(%esp), %edx # 4-byte Reload
+ sbbl 44(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 100(%esp) # 4-byte Spill
+ movl 128(%esp), %edx # 4-byte Reload
+ sbbl 48(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 112(%esp) # 4-byte Spill
+ movl 120(%esp), %edx # 4-byte Reload
+ sbbl 52(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 116(%esp) # 4-byte Spill
+ movl 108(%esp), %edx # 4-byte Reload
+ sbbl 56(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 132(%esp) # 4-byte Spill
+ movl %ecx, %edx
+ sbbl 60(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 144(%esp) # 4-byte Spill
+ sbbl $0, %esi
+ andl $1, %esi
+ jne .LBB212_2
+# BB#1:
+ movl %eax, 168(%esp) # 4-byte Spill
+.LBB212_2:
+ movl %esi, %edx
+ testb %dl, %dl
+ movl 172(%esp), %eax # 4-byte Reload
+ jne .LBB212_4
+# BB#3:
+ movl %edi, %eax
+.LBB212_4:
+ movl 1088(%esp), %edi
+ movl %eax, (%edi)
+ movl %ecx, 104(%esp) # 4-byte Spill
+ movl 156(%esp), %eax # 4-byte Reload
+ movl 160(%esp), %ecx # 4-byte Reload
+ jne .LBB212_6
+# BB#5:
+ movl %ebp, %ecx
+.LBB212_6:
+ movl %ecx, 4(%edi)
+ movl 108(%esp), %ecx # 4-byte Reload
+ movl 164(%esp), %ebp # 4-byte Reload
+ jne .LBB212_8
+# BB#7:
+ movl %ebx, %ebp
+.LBB212_8:
+ movl %ebp, 8(%edi)
+ movl 168(%esp), %ebx # 4-byte Reload
+ movl %ebx, 12(%edi)
+ movl 124(%esp), %ebp # 4-byte Reload
+ movl 136(%esp), %ebx # 4-byte Reload
+ jne .LBB212_10
+# BB#9:
+ movl 80(%esp), %ebx # 4-byte Reload
+.LBB212_10:
+ movl %ebx, 16(%edi)
+ movl 140(%esp), %ebx # 4-byte Reload
+ movl 148(%esp), %esi # 4-byte Reload
+ jne .LBB212_12
+# BB#11:
+ movl 84(%esp), %esi # 4-byte Reload
+.LBB212_12:
+ movl %esi, 20(%edi)
+ movl 128(%esp), %esi # 4-byte Reload
+ jne .LBB212_14
+# BB#13:
+ movl 88(%esp), %eax # 4-byte Reload
+.LBB212_14:
+ movl %eax, 24(%edi)
+ movl 120(%esp), %edx # 4-byte Reload
+ jne .LBB212_16
+# BB#15:
+ movl 92(%esp), %eax # 4-byte Reload
+ movl %eax, 152(%esp) # 4-byte Spill
+.LBB212_16:
+ movl 152(%esp), %eax # 4-byte Reload
+ movl %eax, 28(%edi)
+ jne .LBB212_18
+# BB#17:
+ movl 96(%esp), %ebp # 4-byte Reload
+.LBB212_18:
+ movl %ebp, 32(%edi)
+ jne .LBB212_20
+# BB#19:
+ movl 100(%esp), %ebx # 4-byte Reload
+.LBB212_20:
+ movl %ebx, 36(%edi)
+ jne .LBB212_22
+# BB#21:
+ movl 112(%esp), %esi # 4-byte Reload
+.LBB212_22:
+ movl %esi, 40(%edi)
+ jne .LBB212_24
+# BB#23:
+ movl 116(%esp), %edx # 4-byte Reload
+.LBB212_24:
+ movl %edx, 44(%edi)
+ jne .LBB212_26
+# BB#25:
+ movl 132(%esp), %ecx # 4-byte Reload
+.LBB212_26:
+ movl %ecx, 48(%edi)
+ movl 104(%esp), %eax # 4-byte Reload
+ jne .LBB212_28
+# BB#27:
+ movl 144(%esp), %eax # 4-byte Reload
+.LBB212_28:
+ movl %eax, 52(%edi)
+ addl $1068, %esp # imm = 0x42C
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end212:
+ .size mcl_fp_montRed14L, .Lfunc_end212-mcl_fp_montRed14L
+
+ .globl mcl_fp_addPre14L
+ .align 16, 0x90
+ .type mcl_fp_addPre14L,@function
+mcl_fp_addPre14L: # @mcl_fp_addPre14L
+# BB#0:
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ movl 24(%esp), %eax
+ movl (%eax), %edx
+ movl 4(%eax), %esi
+ movl 20(%esp), %ecx
+ addl (%ecx), %edx
+ adcl 4(%ecx), %esi
+ movl 8(%eax), %edi
+ adcl 8(%ecx), %edi
+ movl 16(%esp), %ebx
+ movl %edx, (%ebx)
+ movl 12(%ecx), %edx
+ movl %esi, 4(%ebx)
+ movl 16(%ecx), %esi
+ adcl 12(%eax), %edx
+ adcl 16(%eax), %esi
+ movl %edi, 8(%ebx)
+ movl 20(%eax), %edi
+ movl %edx, 12(%ebx)
+ movl 20(%ecx), %edx
+ adcl %edi, %edx
+ movl 24(%eax), %edi
+ movl %esi, 16(%ebx)
+ movl 24(%ecx), %esi
+ adcl %edi, %esi
+ movl 28(%eax), %edi
+ movl %edx, 20(%ebx)
+ movl 28(%ecx), %edx
+ adcl %edi, %edx
+ movl 32(%eax), %edi
+ movl %esi, 24(%ebx)
+ movl 32(%ecx), %esi
+ adcl %edi, %esi
+ movl 36(%eax), %edi
+ movl %edx, 28(%ebx)
+ movl 36(%ecx), %edx
+ adcl %edi, %edx
+ movl 40(%eax), %edi
+ movl %esi, 32(%ebx)
+ movl 40(%ecx), %esi
+ adcl %edi, %esi
+ movl 44(%eax), %edi
+ movl %edx, 36(%ebx)
+ movl 44(%ecx), %edx
+ adcl %edi, %edx
+ movl 48(%eax), %edi
+ movl %esi, 40(%ebx)
+ movl 48(%ecx), %esi
+ adcl %edi, %esi
+ movl %edx, 44(%ebx)
+ movl %esi, 48(%ebx)
+ movl 52(%eax), %eax
+ movl 52(%ecx), %ecx
+ adcl %eax, %ecx
+ movl %ecx, 52(%ebx)
+ sbbl %eax, %eax
+ andl $1, %eax
+ popl %esi
+ popl %edi
+ popl %ebx
+ retl
+.Lfunc_end213:
+ .size mcl_fp_addPre14L, .Lfunc_end213-mcl_fp_addPre14L
+
+ .globl mcl_fp_subPre14L
+ .align 16, 0x90
+ .type mcl_fp_subPre14L,@function
+mcl_fp_subPre14L: # @mcl_fp_subPre14L
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ movl 24(%esp), %ecx
+ movl (%ecx), %esi
+ movl 4(%ecx), %edi
+ xorl %eax, %eax
+ movl 28(%esp), %edx
+ subl (%edx), %esi
+ sbbl 4(%edx), %edi
+ movl 8(%ecx), %ebx
+ sbbl 8(%edx), %ebx
+ movl 20(%esp), %ebp
+ movl %esi, (%ebp)
+ movl 12(%ecx), %esi
+ sbbl 12(%edx), %esi
+ movl %edi, 4(%ebp)
+ movl 16(%ecx), %edi
+ sbbl 16(%edx), %edi
+ movl %ebx, 8(%ebp)
+ movl 20(%edx), %ebx
+ movl %esi, 12(%ebp)
+ movl 20(%ecx), %esi
+ sbbl %ebx, %esi
+ movl 24(%edx), %ebx
+ movl %edi, 16(%ebp)
+ movl 24(%ecx), %edi
+ sbbl %ebx, %edi
+ movl 28(%edx), %ebx
+ movl %esi, 20(%ebp)
+ movl 28(%ecx), %esi
+ sbbl %ebx, %esi
+ movl 32(%edx), %ebx
+ movl %edi, 24(%ebp)
+ movl 32(%ecx), %edi
+ sbbl %ebx, %edi
+ movl 36(%edx), %ebx
+ movl %esi, 28(%ebp)
+ movl 36(%ecx), %esi
+ sbbl %ebx, %esi
+ movl 40(%edx), %ebx
+ movl %edi, 32(%ebp)
+ movl 40(%ecx), %edi
+ sbbl %ebx, %edi
+ movl 44(%edx), %ebx
+ movl %esi, 36(%ebp)
+ movl 44(%ecx), %esi
+ sbbl %ebx, %esi
+ movl 48(%edx), %ebx
+ movl %edi, 40(%ebp)
+ movl 48(%ecx), %edi
+ sbbl %ebx, %edi
+ movl %esi, 44(%ebp)
+ movl %edi, 48(%ebp)
+ movl 52(%edx), %edx
+ movl 52(%ecx), %ecx
+ sbbl %edx, %ecx
+ movl %ecx, 52(%ebp)
+ sbbl $0, %eax
+ andl $1, %eax
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end214:
+ .size mcl_fp_subPre14L, .Lfunc_end214-mcl_fp_subPre14L
+
+ .globl mcl_fp_shr1_14L
+ .align 16, 0x90
+ .type mcl_fp_shr1_14L,@function
+mcl_fp_shr1_14L: # @mcl_fp_shr1_14L
+# BB#0:
+ pushl %esi
+ movl 12(%esp), %eax
+ movl (%eax), %edx
+ movl 4(%eax), %esi
+ shrdl $1, %esi, %edx
+ movl 8(%esp), %ecx
+ movl %edx, (%ecx)
+ movl 8(%eax), %edx
+ shrdl $1, %edx, %esi
+ movl %esi, 4(%ecx)
+ movl 12(%eax), %esi
+ shrdl $1, %esi, %edx
+ movl %edx, 8(%ecx)
+ movl 16(%eax), %edx
+ shrdl $1, %edx, %esi
+ movl %esi, 12(%ecx)
+ movl 20(%eax), %esi
+ shrdl $1, %esi, %edx
+ movl %edx, 16(%ecx)
+ movl 24(%eax), %edx
+ shrdl $1, %edx, %esi
+ movl %esi, 20(%ecx)
+ movl 28(%eax), %esi
+ shrdl $1, %esi, %edx
+ movl %edx, 24(%ecx)
+ movl 32(%eax), %edx
+ shrdl $1, %edx, %esi
+ movl %esi, 28(%ecx)
+ movl 36(%eax), %esi
+ shrdl $1, %esi, %edx
+ movl %edx, 32(%ecx)
+ movl 40(%eax), %edx
+ shrdl $1, %edx, %esi
+ movl %esi, 36(%ecx)
+ movl 44(%eax), %esi
+ shrdl $1, %esi, %edx
+ movl %edx, 40(%ecx)
+ movl 48(%eax), %edx
+ shrdl $1, %edx, %esi
+ movl %esi, 44(%ecx)
+ movl 52(%eax), %eax
+ shrdl $1, %eax, %edx
+ movl %edx, 48(%ecx)
+ shrl %eax
+ movl %eax, 52(%ecx)
+ popl %esi
+ retl
+.Lfunc_end215:
+ .size mcl_fp_shr1_14L, .Lfunc_end215-mcl_fp_shr1_14L
+
+ .globl mcl_fp_add14L
+ .align 16, 0x90
+ .type mcl_fp_add14L,@function
+mcl_fp_add14L: # @mcl_fp_add14L
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $44, %esp
+ movl 72(%esp), %eax
+ movl (%eax), %edx
+ movl 4(%eax), %ecx
+ movl 68(%esp), %ebp
+ addl (%ebp), %edx
+ movl %edx, 4(%esp) # 4-byte Spill
+ adcl 4(%ebp), %ecx
+ movl %ecx, 40(%esp) # 4-byte Spill
+ movl 8(%eax), %ecx
+ adcl 8(%ebp), %ecx
+ movl %ecx, 36(%esp) # 4-byte Spill
+ movl 12(%ebp), %edx
+ movl 16(%ebp), %ecx
+ adcl 12(%eax), %edx
+ movl %edx, 32(%esp) # 4-byte Spill
+ adcl 16(%eax), %ecx
+ movl %ecx, 28(%esp) # 4-byte Spill
+ movl 20(%ebp), %ecx
+ adcl 20(%eax), %ecx
+ movl %ecx, 24(%esp) # 4-byte Spill
+ movl 24(%ebp), %ecx
+ adcl 24(%eax), %ecx
+ movl %ecx, 20(%esp) # 4-byte Spill
+ movl 28(%ebp), %ecx
+ adcl 28(%eax), %ecx
+ movl %ecx, 16(%esp) # 4-byte Spill
+ movl 32(%ebp), %ecx
+ adcl 32(%eax), %ecx
+ movl %ecx, 12(%esp) # 4-byte Spill
+ movl 36(%ebp), %ecx
+ adcl 36(%eax), %ecx
+ movl %ecx, 8(%esp) # 4-byte Spill
+ movl 40(%ebp), %edx
+ adcl 40(%eax), %edx
+ movl %edx, (%esp) # 4-byte Spill
+ movl 44(%ebp), %ebx
+ adcl 44(%eax), %ebx
+ movl 48(%ebp), %esi
+ adcl 48(%eax), %esi
+ movl 52(%ebp), %edi
+ adcl 52(%eax), %edi
+ movl 64(%esp), %eax
+ movl 4(%esp), %ebp # 4-byte Reload
+ movl %ebp, (%eax)
+ movl 40(%esp), %ecx # 4-byte Reload
+ movl %ecx, 4(%eax)
+ movl 36(%esp), %ecx # 4-byte Reload
+ movl %ecx, 8(%eax)
+ movl 32(%esp), %ecx # 4-byte Reload
+ movl %ecx, 12(%eax)
+ movl 28(%esp), %ecx # 4-byte Reload
+ movl %ecx, 16(%eax)
+ movl 24(%esp), %ecx # 4-byte Reload
+ movl %ecx, 20(%eax)
+ movl 20(%esp), %ecx # 4-byte Reload
+ movl %ecx, 24(%eax)
+ movl 16(%esp), %ecx # 4-byte Reload
+ movl %ecx, 28(%eax)
+ movl 12(%esp), %ecx # 4-byte Reload
+ movl %ecx, 32(%eax)
+ movl 8(%esp), %ecx # 4-byte Reload
+ movl %ecx, 36(%eax)
+ movl %edx, 40(%eax)
+ movl %ebx, 44(%eax)
+ movl %esi, 48(%eax)
+ movl %edi, 52(%eax)
+ sbbl %ecx, %ecx
+ andl $1, %ecx
+ movl 76(%esp), %edx
+ subl (%edx), %ebp
+ movl %ebp, 4(%esp) # 4-byte Spill
+ movl 40(%esp), %ebp # 4-byte Reload
+ sbbl 4(%edx), %ebp
+ movl %ebp, 40(%esp) # 4-byte Spill
+ movl 36(%esp), %ebp # 4-byte Reload
+ sbbl 8(%edx), %ebp
+ movl %ebp, 36(%esp) # 4-byte Spill
+ movl 32(%esp), %ebp # 4-byte Reload
+ sbbl 12(%edx), %ebp
+ movl %ebp, 32(%esp) # 4-byte Spill
+ movl 28(%esp), %ebp # 4-byte Reload
+ sbbl 16(%edx), %ebp
+ movl %ebp, 28(%esp) # 4-byte Spill
+ movl 24(%esp), %ebp # 4-byte Reload
+ sbbl 20(%edx), %ebp
+ movl %ebp, 24(%esp) # 4-byte Spill
+ movl 20(%esp), %ebp # 4-byte Reload
+ sbbl 24(%edx), %ebp
+ movl %ebp, 20(%esp) # 4-byte Spill
+ movl 16(%esp), %ebp # 4-byte Reload
+ sbbl 28(%edx), %ebp
+ movl %ebp, 16(%esp) # 4-byte Spill
+ movl 12(%esp), %ebp # 4-byte Reload
+ sbbl 32(%edx), %ebp
+ movl %ebp, 12(%esp) # 4-byte Spill
+ movl 8(%esp), %ebp # 4-byte Reload
+ sbbl 36(%edx), %ebp
+ movl %ebp, 8(%esp) # 4-byte Spill
+ movl (%esp), %ebp # 4-byte Reload
+ sbbl 40(%edx), %ebp
+ sbbl 44(%edx), %ebx
+ sbbl 48(%edx), %esi
+ sbbl 52(%edx), %edi
+ sbbl $0, %ecx
+ testb $1, %cl
+ jne .LBB216_2
+# BB#1: # %nocarry
+ movl 4(%esp), %ecx # 4-byte Reload
+ movl %ecx, (%eax)
+ movl 40(%esp), %ecx # 4-byte Reload
+ movl %ecx, 4(%eax)
+ movl 36(%esp), %ecx # 4-byte Reload
+ movl %ecx, 8(%eax)
+ movl 32(%esp), %ecx # 4-byte Reload
+ movl %ecx, 12(%eax)
+ movl 28(%esp), %ecx # 4-byte Reload
+ movl %ecx, 16(%eax)
+ movl 24(%esp), %ecx # 4-byte Reload
+ movl %ecx, 20(%eax)
+ movl 20(%esp), %ecx # 4-byte Reload
+ movl %ecx, 24(%eax)
+ movl 16(%esp), %ecx # 4-byte Reload
+ movl %ecx, 28(%eax)
+ movl 12(%esp), %ecx # 4-byte Reload
+ movl %ecx, 32(%eax)
+ movl 8(%esp), %ecx # 4-byte Reload
+ movl %ecx, 36(%eax)
+ movl %ebp, 40(%eax)
+ movl %ebx, 44(%eax)
+ movl %esi, 48(%eax)
+ movl %edi, 52(%eax)
+.LBB216_2: # %carry
+ addl $44, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end216:
+ .size mcl_fp_add14L, .Lfunc_end216-mcl_fp_add14L
+
+ .globl mcl_fp_addNF14L
+ .align 16, 0x90
+ .type mcl_fp_addNF14L,@function
+mcl_fp_addNF14L: # @mcl_fp_addNF14L
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $112, %esp
+ movl 140(%esp), %eax
+ movl (%eax), %edx
+ movl 4(%eax), %esi
+ movl 136(%esp), %ecx
+ addl (%ecx), %edx
+ movl %edx, 72(%esp) # 4-byte Spill
+ adcl 4(%ecx), %esi
+ movl %esi, 76(%esp) # 4-byte Spill
+ movl 52(%eax), %edx
+ movl %edx, 92(%esp) # 4-byte Spill
+ movl 48(%eax), %edx
+ movl %edx, 96(%esp) # 4-byte Spill
+ movl 44(%eax), %edx
+ movl %edx, 88(%esp) # 4-byte Spill
+ movl 40(%eax), %ebp
+ movl 36(%eax), %edx
+ movl %edx, 104(%esp) # 4-byte Spill
+ movl 32(%eax), %edx
+ movl %edx, 100(%esp) # 4-byte Spill
+ movl 28(%eax), %edx
+ movl %edx, 108(%esp) # 4-byte Spill
+ movl 24(%eax), %edx
+ movl %edx, 84(%esp) # 4-byte Spill
+ movl 20(%eax), %ebx
+ movl 16(%eax), %edi
+ movl 12(%eax), %esi
+ movl 8(%eax), %edx
+ adcl 8(%ecx), %edx
+ movl %edx, 56(%esp) # 4-byte Spill
+ adcl 12(%ecx), %esi
+ movl %esi, 60(%esp) # 4-byte Spill
+ adcl 16(%ecx), %edi
+ movl %edi, 64(%esp) # 4-byte Spill
+ adcl 20(%ecx), %ebx
+ movl %ebx, 68(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 24(%ecx), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 28(%ecx), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 32(%ecx), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 36(%ecx), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ adcl 40(%ecx), %ebp
+ movl %ebp, 80(%esp) # 4-byte Spill
+ movl 88(%esp), %ebp # 4-byte Reload
+ adcl 44(%ecx), %ebp
+ movl %ebp, 88(%esp) # 4-byte Spill
+ movl 96(%esp), %ebp # 4-byte Reload
+ adcl 48(%ecx), %ebp
+ movl %ebp, 96(%esp) # 4-byte Spill
+ movl 92(%esp), %ebp # 4-byte Reload
+ adcl 52(%ecx), %ebp
+ movl %ebp, 92(%esp) # 4-byte Spill
+ movl 144(%esp), %ecx
+ movl 72(%esp), %eax # 4-byte Reload
+ subl (%ecx), %eax
+ movl %eax, (%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ sbbl 4(%ecx), %eax
+ movl %eax, 4(%esp) # 4-byte Spill
+ sbbl 8(%ecx), %edx
+ movl %edx, 8(%esp) # 4-byte Spill
+ sbbl 12(%ecx), %esi
+ movl %esi, 12(%esp) # 4-byte Spill
+ sbbl 16(%ecx), %edi
+ movl %edi, 16(%esp) # 4-byte Spill
+ sbbl 20(%ecx), %ebx
+ movl %ebx, 20(%esp) # 4-byte Spill
+ movl 84(%esp), %edx # 4-byte Reload
+ movl %edx, %eax
+ sbbl 24(%ecx), %eax
+ movl %eax, 24(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ sbbl 28(%ecx), %eax
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ sbbl 32(%ecx), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 104(%esp), %esi # 4-byte Reload
+ sbbl 36(%ecx), %esi
+ movl %esi, 36(%esp) # 4-byte Spill
+ movl 80(%esp), %esi # 4-byte Reload
+ sbbl 40(%ecx), %esi
+ movl %esi, 40(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ movl %eax, %esi
+ movl %eax, %ebp
+ sbbl 44(%ecx), %esi
+ movl %esi, 44(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ movl %eax, %esi
+ sbbl 48(%ecx), %esi
+ movl %esi, 48(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ movl %eax, %edi
+ sbbl 52(%ecx), %edi
+ movl %edi, 52(%esp) # 4-byte Spill
+ movl %edi, %ecx
+ sarl $31, %ecx
+ testl %ecx, %ecx
+ movl 72(%esp), %ecx # 4-byte Reload
+ js .LBB217_2
+# BB#1:
+ movl (%esp), %ecx # 4-byte Reload
+.LBB217_2:
+ movl 132(%esp), %edi
+ movl %ecx, (%edi)
+ movl 76(%esp), %eax # 4-byte Reload
+ js .LBB217_4
+# BB#3:
+ movl 4(%esp), %eax # 4-byte Reload
+.LBB217_4:
+ movl %eax, 4(%edi)
+ movl %edx, %ecx
+ movl 64(%esp), %eax # 4-byte Reload
+ movl 56(%esp), %edx # 4-byte Reload
+ js .LBB217_6
+# BB#5:
+ movl 8(%esp), %edx # 4-byte Reload
+.LBB217_6:
+ movl %edx, 8(%edi)
+ movl %ebp, %edx
+ movl 104(%esp), %ebx # 4-byte Reload
+ movl 60(%esp), %ebp # 4-byte Reload
+ js .LBB217_8
+# BB#7:
+ movl 12(%esp), %ebp # 4-byte Reload
+.LBB217_8:
+ movl %ebp, 12(%edi)
+ movl 100(%esp), %ebp # 4-byte Reload
+ js .LBB217_10
+# BB#9:
+ movl 16(%esp), %eax # 4-byte Reload
+.LBB217_10:
+ movl %eax, 16(%edi)
+ movl 80(%esp), %esi # 4-byte Reload
+ js .LBB217_12
+# BB#11:
+ movl 20(%esp), %eax # 4-byte Reload
+ movl %eax, 68(%esp) # 4-byte Spill
+.LBB217_12:
+ movl 68(%esp), %eax # 4-byte Reload
+ movl %eax, 20(%edi)
+ js .LBB217_14
+# BB#13:
+ movl 24(%esp), %ecx # 4-byte Reload
+.LBB217_14:
+ movl %ecx, 24(%edi)
+ js .LBB217_16
+# BB#15:
+ movl 28(%esp), %eax # 4-byte Reload
+ movl %eax, 108(%esp) # 4-byte Spill
+.LBB217_16:
+ movl 108(%esp), %ecx # 4-byte Reload
+ movl %ecx, 28(%edi)
+ js .LBB217_18
+# BB#17:
+ movl 32(%esp), %ebp # 4-byte Reload
+.LBB217_18:
+ movl %ebp, 32(%edi)
+ js .LBB217_20
+# BB#19:
+ movl 36(%esp), %ebx # 4-byte Reload
+.LBB217_20:
+ movl %ebx, 36(%edi)
+ js .LBB217_22
+# BB#21:
+ movl 40(%esp), %esi # 4-byte Reload
+.LBB217_22:
+ movl %esi, 40(%edi)
+ movl 96(%esp), %eax # 4-byte Reload
+ js .LBB217_24
+# BB#23:
+ movl 44(%esp), %edx # 4-byte Reload
+.LBB217_24:
+ movl %edx, 44(%edi)
+ movl 92(%esp), %ecx # 4-byte Reload
+ js .LBB217_26
+# BB#25:
+ movl 48(%esp), %eax # 4-byte Reload
+.LBB217_26:
+ movl %eax, 48(%edi)
+ js .LBB217_28
+# BB#27:
+ movl 52(%esp), %ecx # 4-byte Reload
+.LBB217_28:
+ movl %ecx, 52(%edi)
+ addl $112, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end217:
+ .size mcl_fp_addNF14L, .Lfunc_end217-mcl_fp_addNF14L
+
+ .globl mcl_fp_sub14L
+ .align 16, 0x90
+ .type mcl_fp_sub14L,@function
+mcl_fp_sub14L: # @mcl_fp_sub14L
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $52, %esp
+ movl 76(%esp), %esi
+ movl (%esi), %eax
+ movl 4(%esi), %ecx
+ xorl %ebx, %ebx
+ movl 80(%esp), %edi
+ subl (%edi), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ sbbl 4(%edi), %ecx
+ movl %ecx, 16(%esp) # 4-byte Spill
+ movl 8(%esi), %eax
+ sbbl 8(%edi), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 12(%esi), %eax
+ sbbl 12(%edi), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 16(%esi), %eax
+ sbbl 16(%edi), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 20(%esi), %eax
+ sbbl 20(%edi), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 24(%esi), %eax
+ sbbl 24(%edi), %eax
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl 28(%esi), %eax
+ sbbl 28(%edi), %eax
+ movl %eax, 24(%esp) # 4-byte Spill
+ movl 32(%esi), %eax
+ sbbl 32(%edi), %eax
+ movl %eax, 20(%esp) # 4-byte Spill
+ movl 36(%esi), %edx
+ sbbl 36(%edi), %edx
+ movl %edx, 12(%esp) # 4-byte Spill
+ movl 40(%esi), %ecx
+ sbbl 40(%edi), %ecx
+ movl %ecx, 8(%esp) # 4-byte Spill
+ movl 44(%esi), %eax
+ sbbl 44(%edi), %eax
+ movl %eax, 4(%esp) # 4-byte Spill
+ movl 48(%esi), %ebp
+ sbbl 48(%edi), %ebp
+ movl 52(%esi), %esi
+ sbbl 52(%edi), %esi
+ sbbl $0, %ebx
+ testb $1, %bl
+ movl 72(%esp), %ebx
+ movl 44(%esp), %edi # 4-byte Reload
+ movl %edi, (%ebx)
+ movl 16(%esp), %edi # 4-byte Reload
+ movl %edi, 4(%ebx)
+ movl 36(%esp), %edi # 4-byte Reload
+ movl %edi, 8(%ebx)
+ movl 48(%esp), %edi # 4-byte Reload
+ movl %edi, 12(%ebx)
+ movl 40(%esp), %edi # 4-byte Reload
+ movl %edi, 16(%ebx)
+ movl 32(%esp), %edi # 4-byte Reload
+ movl %edi, 20(%ebx)
+ movl 28(%esp), %edi # 4-byte Reload
+ movl %edi, 24(%ebx)
+ movl 24(%esp), %edi # 4-byte Reload
+ movl %edi, 28(%ebx)
+ movl 20(%esp), %edi # 4-byte Reload
+ movl %edi, 32(%ebx)
+ movl %edx, 36(%ebx)
+ movl %ecx, 40(%ebx)
+ movl %eax, 44(%ebx)
+ movl %ebp, 48(%ebx)
+ movl %esi, 52(%ebx)
+ je .LBB218_2
+# BB#1: # %carry
+ movl %esi, (%esp) # 4-byte Spill
+ movl 84(%esp), %esi
+ movl 44(%esp), %ecx # 4-byte Reload
+ addl (%esi), %ecx
+ movl %ecx, (%ebx)
+ movl 16(%esp), %edx # 4-byte Reload
+ adcl 4(%esi), %edx
+ movl %edx, 4(%ebx)
+ movl 36(%esp), %edi # 4-byte Reload
+ adcl 8(%esi), %edi
+ movl 12(%esi), %eax
+ adcl 48(%esp), %eax # 4-byte Folded Reload
+ movl %edi, 8(%ebx)
+ movl 16(%esi), %ecx
+ adcl 40(%esp), %ecx # 4-byte Folded Reload
+ movl %eax, 12(%ebx)
+ movl 20(%esi), %eax
+ adcl 32(%esp), %eax # 4-byte Folded Reload
+ movl %ecx, 16(%ebx)
+ movl 24(%esi), %ecx
+ adcl 28(%esp), %ecx # 4-byte Folded Reload
+ movl %eax, 20(%ebx)
+ movl 28(%esi), %eax
+ adcl 24(%esp), %eax # 4-byte Folded Reload
+ movl %ecx, 24(%ebx)
+ movl 32(%esi), %ecx
+ adcl 20(%esp), %ecx # 4-byte Folded Reload
+ movl %eax, 28(%ebx)
+ movl 36(%esi), %eax
+ adcl 12(%esp), %eax # 4-byte Folded Reload
+ movl %ecx, 32(%ebx)
+ movl 40(%esi), %ecx
+ adcl 8(%esp), %ecx # 4-byte Folded Reload
+ movl %eax, 36(%ebx)
+ movl 44(%esi), %eax
+ adcl 4(%esp), %eax # 4-byte Folded Reload
+ movl %ecx, 40(%ebx)
+ movl %eax, 44(%ebx)
+ movl 48(%esi), %eax
+ adcl %ebp, %eax
+ movl %eax, 48(%ebx)
+ movl 52(%esi), %eax
+ adcl (%esp), %eax # 4-byte Folded Reload
+ movl %eax, 52(%ebx)
+.LBB218_2: # %nocarry
+ addl $52, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end218:
+ .size mcl_fp_sub14L, .Lfunc_end218-mcl_fp_sub14L
+
+ .globl mcl_fp_subNF14L
+ .align 16, 0x90
+ .type mcl_fp_subNF14L,@function
+mcl_fp_subNF14L: # @mcl_fp_subNF14L
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $88, %esp
+ movl 112(%esp), %ecx
+ movl 52(%ecx), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl (%ecx), %edx
+ movl 4(%ecx), %eax
+ movl 116(%esp), %edi
+ subl (%edi), %edx
+ movl %edx, 56(%esp) # 4-byte Spill
+ sbbl 4(%edi), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 48(%ecx), %edx
+ movl %edx, 80(%esp) # 4-byte Spill
+ movl 44(%ecx), %edx
+ movl %edx, 76(%esp) # 4-byte Spill
+ movl 40(%ecx), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 36(%ecx), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 32(%ecx), %ebp
+ movl 28(%ecx), %esi
+ movl %esi, 84(%esp) # 4-byte Spill
+ movl 24(%ecx), %ebx
+ movl 20(%ecx), %esi
+ movl 16(%ecx), %edx
+ movl 12(%ecx), %eax
+ movl 8(%ecx), %ecx
+ sbbl 8(%edi), %ecx
+ movl %ecx, 32(%esp) # 4-byte Spill
+ sbbl 12(%edi), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ sbbl 16(%edi), %edx
+ movl %edx, 40(%esp) # 4-byte Spill
+ sbbl 20(%edi), %esi
+ movl %esi, 44(%esp) # 4-byte Spill
+ sbbl 24(%edi), %ebx
+ movl %ebx, 48(%esp) # 4-byte Spill
+ movl 84(%esp), %ecx # 4-byte Reload
+ sbbl 28(%edi), %ecx
+ movl %ecx, 84(%esp) # 4-byte Spill
+ sbbl 32(%edi), %ebp
+ movl %ebp, 52(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ sbbl 36(%edi), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ sbbl 40(%edi), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 76(%esp), %ecx # 4-byte Reload
+ sbbl 44(%edi), %ecx
+ movl %ecx, 76(%esp) # 4-byte Spill
+ movl 80(%esp), %ecx # 4-byte Reload
+ sbbl 48(%edi), %ecx
+ movl %ecx, 80(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ sbbl 52(%edi), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl %eax, %esi
+ sarl $31, %esi
+ movl %esi, %ecx
+ addl %ecx, %ecx
+ movl %esi, %ebp
+ adcl %ebp, %ebp
+ shrl $31, %eax
+ orl %ecx, %eax
+ movl 120(%esp), %edi
+ andl 4(%edi), %ebp
+ andl (%edi), %eax
+ movl 52(%edi), %ecx
+ andl %esi, %ecx
+ movl %ecx, 28(%esp) # 4-byte Spill
+ movl 48(%edi), %ecx
+ andl %esi, %ecx
+ movl %ecx, 24(%esp) # 4-byte Spill
+ movl 44(%edi), %ecx
+ andl %esi, %ecx
+ movl %ecx, 20(%esp) # 4-byte Spill
+ movl 40(%edi), %ecx
+ andl %esi, %ecx
+ movl %ecx, 16(%esp) # 4-byte Spill
+ movl 36(%edi), %ecx
+ andl %esi, %ecx
+ movl %ecx, 12(%esp) # 4-byte Spill
+ movl 32(%edi), %ecx
+ andl %esi, %ecx
+ movl %ecx, 8(%esp) # 4-byte Spill
+ movl 28(%edi), %ecx
+ andl %esi, %ecx
+ movl %ecx, 4(%esp) # 4-byte Spill
+ movl 24(%edi), %ecx
+ andl %esi, %ecx
+ movl %ecx, (%esp) # 4-byte Spill
+ movl 20(%edi), %ebx
+ andl %esi, %ebx
+ movl 16(%edi), %edx
+ andl %esi, %edx
+ movl 12(%edi), %ecx
+ andl %esi, %ecx
+ andl 8(%edi), %esi
+ addl 56(%esp), %eax # 4-byte Folded Reload
+ adcl 60(%esp), %ebp # 4-byte Folded Reload
+ movl 108(%esp), %edi
+ movl %eax, (%edi)
+ adcl 32(%esp), %esi # 4-byte Folded Reload
+ movl %ebp, 4(%edi)
+ adcl 36(%esp), %ecx # 4-byte Folded Reload
+ movl %esi, 8(%edi)
+ adcl 40(%esp), %edx # 4-byte Folded Reload
+ movl %ecx, 12(%edi)
+ adcl 44(%esp), %ebx # 4-byte Folded Reload
+ movl %edx, 16(%edi)
+ movl (%esp), %eax # 4-byte Reload
+ adcl 48(%esp), %eax # 4-byte Folded Reload
+ movl %ebx, 20(%edi)
+ movl 4(%esp), %ecx # 4-byte Reload
+ adcl 84(%esp), %ecx # 4-byte Folded Reload
+ movl %eax, 24(%edi)
+ movl 8(%esp), %eax # 4-byte Reload
+ adcl 52(%esp), %eax # 4-byte Folded Reload
+ movl %ecx, 28(%edi)
+ movl 12(%esp), %ecx # 4-byte Reload
+ adcl 68(%esp), %ecx # 4-byte Folded Reload
+ movl %eax, 32(%edi)
+ movl 16(%esp), %eax # 4-byte Reload
+ adcl 72(%esp), %eax # 4-byte Folded Reload
+ movl %ecx, 36(%edi)
+ movl 20(%esp), %ecx # 4-byte Reload
+ adcl 76(%esp), %ecx # 4-byte Folded Reload
+ movl %eax, 40(%edi)
+ movl 24(%esp), %eax # 4-byte Reload
+ adcl 80(%esp), %eax # 4-byte Folded Reload
+ movl %ecx, 44(%edi)
+ movl %eax, 48(%edi)
+ movl 28(%esp), %eax # 4-byte Reload
+ adcl 64(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 52(%edi)
+ addl $88, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end219:
+ .size mcl_fp_subNF14L, .Lfunc_end219-mcl_fp_subNF14L
+
+ .globl mcl_fpDbl_add14L
+ .align 16, 0x90
+ .type mcl_fpDbl_add14L,@function
+mcl_fpDbl_add14L: # @mcl_fpDbl_add14L
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $100, %esp
+ movl 128(%esp), %ecx
+ movl 124(%esp), %esi
+ movl 12(%esi), %edi
+ movl 16(%esi), %edx
+ movl 8(%ecx), %ebx
+ movl (%ecx), %ebp
+ addl (%esi), %ebp
+ movl 120(%esp), %eax
+ movl %ebp, (%eax)
+ movl 4(%ecx), %ebp
+ adcl 4(%esi), %ebp
+ adcl 8(%esi), %ebx
+ adcl 12(%ecx), %edi
+ adcl 16(%ecx), %edx
+ movl %ebp, 4(%eax)
+ movl 64(%ecx), %ebp
+ movl %ebx, 8(%eax)
+ movl 20(%ecx), %ebx
+ movl %edi, 12(%eax)
+ movl 20(%esi), %edi
+ adcl %ebx, %edi
+ movl 24(%ecx), %ebx
+ movl %edx, 16(%eax)
+ movl 24(%esi), %edx
+ adcl %ebx, %edx
+ movl 28(%ecx), %ebx
+ movl %edi, 20(%eax)
+ movl 28(%esi), %edi
+ adcl %ebx, %edi
+ movl 32(%ecx), %ebx
+ movl %edx, 24(%eax)
+ movl 32(%esi), %edx
+ adcl %ebx, %edx
+ movl 36(%ecx), %ebx
+ movl %edi, 28(%eax)
+ movl 36(%esi), %edi
+ adcl %ebx, %edi
+ movl 40(%ecx), %ebx
+ movl %edx, 32(%eax)
+ movl 40(%esi), %edx
+ adcl %ebx, %edx
+ movl 44(%ecx), %ebx
+ movl %edi, 36(%eax)
+ movl 44(%esi), %edi
+ adcl %ebx, %edi
+ movl 48(%ecx), %ebx
+ movl %edx, 40(%eax)
+ movl 48(%esi), %edx
+ adcl %ebx, %edx
+ movl 52(%ecx), %ebx
+ movl %edi, 44(%eax)
+ movl 52(%esi), %edi
+ adcl %ebx, %edi
+ movl 56(%ecx), %ebx
+ movl %edx, 48(%eax)
+ movl 56(%esi), %edx
+ adcl %ebx, %edx
+ movl %edx, 72(%esp) # 4-byte Spill
+ movl 60(%ecx), %edx
+ movl %edi, 52(%eax)
+ movl 60(%esi), %eax
+ adcl %edx, %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 64(%esi), %eax
+ adcl %ebp, %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 68(%ecx), %edx
+ movl 68(%esi), %eax
+ adcl %edx, %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 72(%ecx), %edx
+ movl 72(%esi), %eax
+ adcl %edx, %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 76(%ecx), %edx
+ movl 76(%esi), %eax
+ adcl %edx, %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 80(%ecx), %edx
+ movl 80(%esi), %eax
+ adcl %edx, %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 84(%ecx), %edx
+ movl 84(%esi), %eax
+ adcl %edx, %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 88(%ecx), %edx
+ movl 88(%esi), %eax
+ adcl %edx, %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 92(%ecx), %edx
+ movl 92(%esi), %eax
+ adcl %edx, %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 96(%ecx), %edx
+ movl 96(%esi), %eax
+ adcl %edx, %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 100(%ecx), %edx
+ movl 100(%esi), %edi
+ adcl %edx, %edi
+ movl %edi, 52(%esp) # 4-byte Spill
+ movl 104(%ecx), %edx
+ movl 104(%esi), %ebx
+ adcl %edx, %ebx
+ movl %ebx, 56(%esp) # 4-byte Spill
+ movl 108(%ecx), %ecx
+ movl 108(%esi), %esi
+ adcl %ecx, %esi
+ sbbl %edx, %edx
+ andl $1, %edx
+ movl 132(%esp), %ebp
+ movl 72(%esp), %ecx # 4-byte Reload
+ subl (%ebp), %ecx
+ movl %ecx, 32(%esp) # 4-byte Spill
+ movl 76(%esp), %ecx # 4-byte Reload
+ sbbl 4(%ebp), %ecx
+ movl %ecx, 28(%esp) # 4-byte Spill
+ movl 80(%esp), %ecx # 4-byte Reload
+ sbbl 8(%ebp), %ecx
+ movl %ecx, 24(%esp) # 4-byte Spill
+ movl 84(%esp), %ecx # 4-byte Reload
+ sbbl 12(%ebp), %ecx
+ movl %ecx, 20(%esp) # 4-byte Spill
+ movl 88(%esp), %ecx # 4-byte Reload
+ sbbl 16(%ebp), %ecx
+ movl %ecx, 16(%esp) # 4-byte Spill
+ movl 92(%esp), %ecx # 4-byte Reload
+ sbbl 20(%ebp), %ecx
+ movl %ecx, 12(%esp) # 4-byte Spill
+ movl 96(%esp), %ecx # 4-byte Reload
+ sbbl 24(%ebp), %ecx
+ movl %ecx, 8(%esp) # 4-byte Spill
+ movl 60(%esp), %ecx # 4-byte Reload
+ sbbl 28(%ebp), %ecx
+ movl %ecx, 4(%esp) # 4-byte Spill
+ movl 64(%esp), %ecx # 4-byte Reload
+ sbbl 32(%ebp), %ecx
+ movl %ecx, (%esp) # 4-byte Spill
+ movl 68(%esp), %ecx # 4-byte Reload
+ sbbl 36(%ebp), %ecx
+ sbbl 40(%ebp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ sbbl 44(%ebp), %edi
+ movl %edi, 40(%esp) # 4-byte Spill
+ movl %ebx, %eax
+ movl %esi, %ebx
+ sbbl 48(%ebp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ sbbl 52(%ebp), %esi
+ sbbl $0, %edx
+ andl $1, %edx
+ jne .LBB220_2
+# BB#1:
+ movl %esi, %ebx
+.LBB220_2:
+ testb %dl, %dl
+ movl 72(%esp), %eax # 4-byte Reload
+ movl 68(%esp), %edx # 4-byte Reload
+ movl 64(%esp), %edi # 4-byte Reload
+ movl 60(%esp), %ebp # 4-byte Reload
+ jne .LBB220_4
+# BB#3:
+ movl %ecx, %edx
+ movl (%esp), %edi # 4-byte Reload
+ movl 4(%esp), %ebp # 4-byte Reload
+ movl 8(%esp), %eax # 4-byte Reload
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 12(%esp), %eax # 4-byte Reload
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 16(%esp), %eax # 4-byte Reload
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 20(%esp), %eax # 4-byte Reload
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 24(%esp), %eax # 4-byte Reload
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 28(%esp), %eax # 4-byte Reload
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 32(%esp), %eax # 4-byte Reload
+.LBB220_4:
+ movl 120(%esp), %esi
+ movl %eax, 56(%esi)
+ movl 76(%esp), %eax # 4-byte Reload
+ movl %eax, 60(%esi)
+ movl 80(%esp), %eax # 4-byte Reload
+ movl %eax, 64(%esi)
+ movl 84(%esp), %eax # 4-byte Reload
+ movl %eax, 68(%esi)
+ movl 88(%esp), %eax # 4-byte Reload
+ movl %eax, 72(%esi)
+ movl 92(%esp), %eax # 4-byte Reload
+ movl %eax, 76(%esi)
+ movl 96(%esp), %eax # 4-byte Reload
+ movl %eax, 80(%esi)
+ movl %ebp, 84(%esi)
+ movl %edi, 88(%esi)
+ movl %edx, 92(%esi)
+ movl 52(%esp), %edx # 4-byte Reload
+ movl 48(%esp), %eax # 4-byte Reload
+ jne .LBB220_6
+# BB#5:
+ movl 36(%esp), %eax # 4-byte Reload
+.LBB220_6:
+ movl %eax, 96(%esi)
+ movl 56(%esp), %ecx # 4-byte Reload
+ jne .LBB220_8
+# BB#7:
+ movl 40(%esp), %edx # 4-byte Reload
+.LBB220_8:
+ movl %edx, 100(%esi)
+ jne .LBB220_10
+# BB#9:
+ movl 44(%esp), %ecx # 4-byte Reload
+.LBB220_10:
+ movl %ecx, 104(%esi)
+ movl %ebx, 108(%esi)
+ addl $100, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end220:
+ .size mcl_fpDbl_add14L, .Lfunc_end220-mcl_fpDbl_add14L
+
+ .globl mcl_fpDbl_sub14L
+ .align 16, 0x90
+ .type mcl_fpDbl_sub14L,@function
+mcl_fpDbl_sub14L: # @mcl_fpDbl_sub14L
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $96, %esp
+ movl 120(%esp), %ebx
+ movl (%ebx), %eax
+ movl 4(%ebx), %edx
+ movl 124(%esp), %ebp
+ subl (%ebp), %eax
+ sbbl 4(%ebp), %edx
+ movl 8(%ebx), %esi
+ sbbl 8(%ebp), %esi
+ movl 116(%esp), %ecx
+ movl %eax, (%ecx)
+ movl 12(%ebx), %eax
+ sbbl 12(%ebp), %eax
+ movl %edx, 4(%ecx)
+ movl 16(%ebx), %edx
+ sbbl 16(%ebp), %edx
+ movl %esi, 8(%ecx)
+ movl 20(%ebp), %esi
+ movl %eax, 12(%ecx)
+ movl 20(%ebx), %eax
+ sbbl %esi, %eax
+ movl 24(%ebp), %esi
+ movl %edx, 16(%ecx)
+ movl 24(%ebx), %edx
+ sbbl %esi, %edx
+ movl 28(%ebp), %esi
+ movl %eax, 20(%ecx)
+ movl 28(%ebx), %eax
+ sbbl %esi, %eax
+ movl 32(%ebp), %esi
+ movl %edx, 24(%ecx)
+ movl 32(%ebx), %edx
+ sbbl %esi, %edx
+ movl 36(%ebp), %esi
+ movl %eax, 28(%ecx)
+ movl 36(%ebx), %eax
+ sbbl %esi, %eax
+ movl 40(%ebp), %esi
+ movl %edx, 32(%ecx)
+ movl 40(%ebx), %edx
+ sbbl %esi, %edx
+ movl 44(%ebp), %esi
+ movl %eax, 36(%ecx)
+ movl 44(%ebx), %eax
+ sbbl %esi, %eax
+ movl 48(%ebp), %esi
+ movl %edx, 40(%ecx)
+ movl 48(%ebx), %edx
+ sbbl %esi, %edx
+ movl 52(%ebp), %esi
+ movl %eax, 44(%ecx)
+ movl 52(%ebx), %eax
+ sbbl %esi, %eax
+ movl 56(%ebp), %esi
+ movl %edx, 48(%ecx)
+ movl 56(%ebx), %edx
+ sbbl %esi, %edx
+ movl %edx, 44(%esp) # 4-byte Spill
+ movl 60(%ebp), %edx
+ movl %eax, 52(%ecx)
+ movl 60(%ebx), %eax
+ sbbl %edx, %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 64(%ebp), %eax
+ movl 64(%ebx), %edx
+ sbbl %eax, %edx
+ movl %edx, 40(%esp) # 4-byte Spill
+ movl 68(%ebp), %eax
+ movl 68(%ebx), %edx
+ sbbl %eax, %edx
+ movl %edx, 48(%esp) # 4-byte Spill
+ movl 72(%ebp), %eax
+ movl 72(%ebx), %edx
+ sbbl %eax, %edx
+ movl %edx, 52(%esp) # 4-byte Spill
+ movl 76(%ebp), %eax
+ movl 76(%ebx), %edx
+ sbbl %eax, %edx
+ movl %edx, 60(%esp) # 4-byte Spill
+ movl 80(%ebp), %eax
+ movl 80(%ebx), %edx
+ sbbl %eax, %edx
+ movl %edx, 64(%esp) # 4-byte Spill
+ movl 84(%ebp), %eax
+ movl 84(%ebx), %edx
+ sbbl %eax, %edx
+ movl %edx, 68(%esp) # 4-byte Spill
+ movl 88(%ebp), %eax
+ movl 88(%ebx), %edx
+ sbbl %eax, %edx
+ movl %edx, 72(%esp) # 4-byte Spill
+ movl 92(%ebp), %eax
+ movl 92(%ebx), %edx
+ sbbl %eax, %edx
+ movl %edx, 76(%esp) # 4-byte Spill
+ movl 96(%ebp), %eax
+ movl 96(%ebx), %edx
+ sbbl %eax, %edx
+ movl %edx, 80(%esp) # 4-byte Spill
+ movl 100(%ebp), %eax
+ movl 100(%ebx), %edx
+ sbbl %eax, %edx
+ movl %edx, 84(%esp) # 4-byte Spill
+ movl 104(%ebp), %eax
+ movl 104(%ebx), %edx
+ sbbl %eax, %edx
+ movl %edx, 88(%esp) # 4-byte Spill
+ movl 108(%ebp), %eax
+ movl 108(%ebx), %edx
+ sbbl %eax, %edx
+ movl %edx, 92(%esp) # 4-byte Spill
+ movl $0, %eax
+ sbbl $0, %eax
+ andl $1, %eax
+ movl 128(%esp), %ebp
+ jne .LBB221_1
+# BB#2:
+ movl $0, 56(%esp) # 4-byte Folded Spill
+ jmp .LBB221_3
+.LBB221_1:
+ movl 52(%ebp), %edx
+ movl %edx, 56(%esp) # 4-byte Spill
+.LBB221_3:
+ testb %al, %al
+ jne .LBB221_4
+# BB#5:
+ movl $0, 24(%esp) # 4-byte Folded Spill
+ movl $0, 20(%esp) # 4-byte Folded Spill
+ jmp .LBB221_6
+.LBB221_4:
+ movl (%ebp), %eax
+ movl %eax, 20(%esp) # 4-byte Spill
+ movl 4(%ebp), %eax
+ movl %eax, 24(%esp) # 4-byte Spill
+.LBB221_6:
+ jne .LBB221_7
+# BB#8:
+ movl $0, 32(%esp) # 4-byte Folded Spill
+ jmp .LBB221_9
+.LBB221_7:
+ movl 48(%ebp), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+.LBB221_9:
+ jne .LBB221_10
+# BB#11:
+ movl $0, 28(%esp) # 4-byte Folded Spill
+ jmp .LBB221_12
+.LBB221_10:
+ movl 44(%ebp), %eax
+ movl %eax, 28(%esp) # 4-byte Spill
+.LBB221_12:
+ jne .LBB221_13
+# BB#14:
+ movl $0, 16(%esp) # 4-byte Folded Spill
+ jmp .LBB221_15
+.LBB221_13:
+ movl 40(%ebp), %eax
+ movl %eax, 16(%esp) # 4-byte Spill
+.LBB221_15:
+ jne .LBB221_16
+# BB#17:
+ movl $0, 12(%esp) # 4-byte Folded Spill
+ jmp .LBB221_18
+.LBB221_16:
+ movl 36(%ebp), %eax
+ movl %eax, 12(%esp) # 4-byte Spill
+.LBB221_18:
+ jne .LBB221_19
+# BB#20:
+ movl $0, 8(%esp) # 4-byte Folded Spill
+ jmp .LBB221_21
+.LBB221_19:
+ movl 32(%ebp), %eax
+ movl %eax, 8(%esp) # 4-byte Spill
+.LBB221_21:
+ jne .LBB221_22
+# BB#23:
+ movl $0, 4(%esp) # 4-byte Folded Spill
+ jmp .LBB221_24
+.LBB221_22:
+ movl 28(%ebp), %eax
+ movl %eax, 4(%esp) # 4-byte Spill
+.LBB221_24:
+ jne .LBB221_25
+# BB#26:
+ movl $0, (%esp) # 4-byte Folded Spill
+ jmp .LBB221_27
+.LBB221_25:
+ movl 24(%ebp), %eax
+ movl %eax, (%esp) # 4-byte Spill
+.LBB221_27:
+ jne .LBB221_28
+# BB#29:
+ movl $0, %esi
+ jmp .LBB221_30
+.LBB221_28:
+ movl 20(%ebp), %esi
+.LBB221_30:
+ jne .LBB221_31
+# BB#32:
+ movl $0, %edi
+ jmp .LBB221_33
+.LBB221_31:
+ movl 16(%ebp), %edi
+.LBB221_33:
+ jne .LBB221_34
+# BB#35:
+ movl $0, %ebx
+ jmp .LBB221_36
+.LBB221_34:
+ movl 12(%ebp), %ebx
+.LBB221_36:
+ jne .LBB221_37
+# BB#38:
+ xorl %ebp, %ebp
+ jmp .LBB221_39
+.LBB221_37:
+ movl 8(%ebp), %ebp
+.LBB221_39:
+ movl 20(%esp), %edx # 4-byte Reload
+ addl 44(%esp), %edx # 4-byte Folded Reload
+ movl 24(%esp), %eax # 4-byte Reload
+ adcl 36(%esp), %eax # 4-byte Folded Reload
+ movl %edx, 56(%ecx)
+ adcl 40(%esp), %ebp # 4-byte Folded Reload
+ movl %eax, 60(%ecx)
+ adcl 48(%esp), %ebx # 4-byte Folded Reload
+ movl %ebp, 64(%ecx)
+ adcl 52(%esp), %edi # 4-byte Folded Reload
+ movl %ebx, 68(%ecx)
+ adcl 60(%esp), %esi # 4-byte Folded Reload
+ movl %edi, 72(%ecx)
+ movl (%esp), %eax # 4-byte Reload
+ adcl 64(%esp), %eax # 4-byte Folded Reload
+ movl %esi, 76(%ecx)
+ movl 4(%esp), %edx # 4-byte Reload
+ adcl 68(%esp), %edx # 4-byte Folded Reload
+ movl %eax, 80(%ecx)
+ movl 8(%esp), %eax # 4-byte Reload
+ adcl 72(%esp), %eax # 4-byte Folded Reload
+ movl %edx, 84(%ecx)
+ movl 12(%esp), %edx # 4-byte Reload
+ adcl 76(%esp), %edx # 4-byte Folded Reload
+ movl %eax, 88(%ecx)
+ movl 16(%esp), %eax # 4-byte Reload
+ adcl 80(%esp), %eax # 4-byte Folded Reload
+ movl %edx, 92(%ecx)
+ movl 28(%esp), %edx # 4-byte Reload
+ adcl 84(%esp), %edx # 4-byte Folded Reload
+ movl %eax, 96(%ecx)
+ movl 32(%esp), %eax # 4-byte Reload
+ adcl 88(%esp), %eax # 4-byte Folded Reload
+ movl %edx, 100(%ecx)
+ movl %eax, 104(%ecx)
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 92(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 108(%ecx)
+ addl $96, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end221:
+ .size mcl_fpDbl_sub14L, .Lfunc_end221-mcl_fpDbl_sub14L
+
+ .align 16, 0x90
+ .type .LmulPv480x32,@function
+.LmulPv480x32: # @mulPv480x32
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $104, %esp
+ movl %edx, %ebp
+ movl 124(%esp), %esi
+ movl %esi, %eax
+ mull 56(%ebp)
+ movl %edx, 100(%esp) # 4-byte Spill
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl %esi, %eax
+ mull 52(%ebp)
+ movl %edx, 92(%esp) # 4-byte Spill
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl %esi, %eax
+ mull 48(%ebp)
+ movl %edx, 84(%esp) # 4-byte Spill
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl %esi, %eax
+ mull 44(%ebp)
+ movl %edx, 76(%esp) # 4-byte Spill
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl %esi, %eax
+ mull 40(%ebp)
+ movl %edx, 68(%esp) # 4-byte Spill
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl %esi, %eax
+ mull 36(%ebp)
+ movl %edx, 60(%esp) # 4-byte Spill
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl %esi, %eax
+ mull 32(%ebp)
+ movl %edx, 52(%esp) # 4-byte Spill
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl %esi, %eax
+ mull 28(%ebp)
+ movl %edx, 44(%esp) # 4-byte Spill
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl %esi, %eax
+ mull 24(%ebp)
+ movl %edx, 36(%esp) # 4-byte Spill
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl %esi, %eax
+ mull 20(%ebp)
+ movl %edx, 28(%esp) # 4-byte Spill
+ movl %eax, 24(%esp) # 4-byte Spill
+ movl %esi, %eax
+ mull 16(%ebp)
+ movl %edx, 20(%esp) # 4-byte Spill
+ movl %eax, 16(%esp) # 4-byte Spill
+ movl %esi, %eax
+ mull 12(%ebp)
+ movl %edx, 12(%esp) # 4-byte Spill
+ movl %eax, 8(%esp) # 4-byte Spill
+ movl %esi, %eax
+ mull 8(%ebp)
+ movl %edx, %edi
+ movl %eax, 4(%esp) # 4-byte Spill
+ movl %esi, %eax
+ mull 4(%ebp)
+ movl %edx, %ebx
+ movl %eax, (%esp) # 4-byte Spill
+ movl %esi, %eax
+ mull (%ebp)
+ movl %eax, (%ecx)
+ addl (%esp), %edx # 4-byte Folded Reload
+ movl %edx, 4(%ecx)
+ adcl 4(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 8(%ecx)
+ adcl 8(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 12(%ecx)
+ movl 12(%esp), %eax # 4-byte Reload
+ adcl 16(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 16(%ecx)
+ movl 20(%esp), %eax # 4-byte Reload
+ adcl 24(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 20(%ecx)
+ movl 28(%esp), %eax # 4-byte Reload
+ adcl 32(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 24(%ecx)
+ movl 36(%esp), %eax # 4-byte Reload
+ adcl 40(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 28(%ecx)
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 48(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 32(%ecx)
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 56(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 36(%ecx)
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 64(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 40(%ecx)
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 72(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 44(%ecx)
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 80(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 48(%ecx)
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 88(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 52(%ecx)
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 96(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 56(%ecx)
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl $0, %eax
+ movl %eax, 60(%ecx)
+ movl %ecx, %eax
+ addl $104, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end222:
+ .size .LmulPv480x32, .Lfunc_end222-.LmulPv480x32
+
+ .globl mcl_fp_mulUnitPre15L
+ .align 16, 0x90
+ .type mcl_fp_mulUnitPre15L,@function
+mcl_fp_mulUnitPre15L: # @mcl_fp_mulUnitPre15L
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $124, %esp
+ calll .L223$pb
+.L223$pb:
+ popl %ebx
+.Ltmp44:
+ addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp44-.L223$pb), %ebx
+ movl 152(%esp), %eax
+ movl %eax, (%esp)
+ leal 56(%esp), %ecx
+ movl 148(%esp), %edx
+ calll .LmulPv480x32
+ movl 116(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 112(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 108(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 104(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 100(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 96(%esp), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 92(%esp), %eax
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl 88(%esp), %eax
+ movl %eax, 24(%esp) # 4-byte Spill
+ movl 84(%esp), %eax
+ movl %eax, 20(%esp) # 4-byte Spill
+ movl 80(%esp), %eax
+ movl %eax, 16(%esp) # 4-byte Spill
+ movl 76(%esp), %ebp
+ movl 72(%esp), %ebx
+ movl 68(%esp), %edi
+ movl 64(%esp), %esi
+ movl 56(%esp), %edx
+ movl 60(%esp), %ecx
+ movl 144(%esp), %eax
+ movl %edx, (%eax)
+ movl %ecx, 4(%eax)
+ movl %esi, 8(%eax)
+ movl %edi, 12(%eax)
+ movl %ebx, 16(%eax)
+ movl %ebp, 20(%eax)
+ movl 16(%esp), %ecx # 4-byte Reload
+ movl %ecx, 24(%eax)
+ movl 20(%esp), %ecx # 4-byte Reload
+ movl %ecx, 28(%eax)
+ movl 24(%esp), %ecx # 4-byte Reload
+ movl %ecx, 32(%eax)
+ movl 28(%esp), %ecx # 4-byte Reload
+ movl %ecx, 36(%eax)
+ movl 32(%esp), %ecx # 4-byte Reload
+ movl %ecx, 40(%eax)
+ movl 36(%esp), %ecx # 4-byte Reload
+ movl %ecx, 44(%eax)
+ movl 40(%esp), %ecx # 4-byte Reload
+ movl %ecx, 48(%eax)
+ movl 44(%esp), %ecx # 4-byte Reload
+ movl %ecx, 52(%eax)
+ movl 48(%esp), %ecx # 4-byte Reload
+ movl %ecx, 56(%eax)
+ movl 52(%esp), %ecx # 4-byte Reload
+ movl %ecx, 60(%eax)
+ addl $124, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end223:
+ .size mcl_fp_mulUnitPre15L, .Lfunc_end223-mcl_fp_mulUnitPre15L
+
+ .globl mcl_fpDbl_mulPre15L
+ .align 16, 0x90
+ .type mcl_fpDbl_mulPre15L,@function
+mcl_fpDbl_mulPre15L: # @mcl_fpDbl_mulPre15L
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $1084, %esp # imm = 0x43C
+ calll .L224$pb
+.L224$pb:
+ popl %esi
+.Ltmp45:
+ addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp45-.L224$pb), %esi
+ movl %esi, 116(%esp) # 4-byte Spill
+ movl 1112(%esp), %eax
+ movl (%eax), %eax
+ movl %eax, (%esp)
+ leal 1016(%esp), %ecx
+ movl 1108(%esp), %edi
+ movl %edi, %edx
+ movl %esi, %ebx
+ calll .LmulPv480x32
+ movl 1076(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 1072(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 1068(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 1064(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 1060(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 1056(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 1052(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 1048(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 1044(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 1040(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 1036(%esp), %eax
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl 1032(%esp), %eax
+ movl %eax, 24(%esp) # 4-byte Spill
+ movl 1028(%esp), %eax
+ movl %eax, 20(%esp) # 4-byte Spill
+ movl 1024(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 1016(%esp), %eax
+ movl 1020(%esp), %ebp
+ movl 1104(%esp), %ecx
+ movl %eax, (%ecx)
+ movl 1112(%esp), %eax
+ movl 4(%eax), %eax
+ movl %eax, (%esp)
+ leal 952(%esp), %ecx
+ movl %edi, %edx
+ movl %esi, %ebx
+ calll .LmulPv480x32
+ addl 952(%esp), %ebp
+ movl %ebp, 16(%esp) # 4-byte Spill
+ movl 1012(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 1008(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 1004(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 1000(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 996(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 992(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 988(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 984(%esp), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 980(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 976(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 972(%esp), %edi
+ movl 968(%esp), %esi
+ movl 964(%esp), %edx
+ movl 956(%esp), %eax
+ movl 960(%esp), %ecx
+ movl 1104(%esp), %ebp
+ movl 16(%esp), %ebx # 4-byte Reload
+ movl %ebx, 4(%ebp)
+ adcl 72(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 72(%esp) # 4-byte Spill
+ adcl 20(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 16(%esp) # 4-byte Spill
+ adcl 24(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 20(%esp) # 4-byte Spill
+ adcl 28(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 24(%esp) # 4-byte Spill
+ adcl 36(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 28(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 44(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 52(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl %eax, 32(%esp) # 4-byte Folded Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl %eax, 40(%esp) # 4-byte Folded Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl %eax, 48(%esp) # 4-byte Folded Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 88(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 96(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 104(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 112(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 108(%esp) # 4-byte Spill
+ adcl $0, 56(%esp) # 4-byte Folded Spill
+ movl 1112(%esp), %eax
+ movl 8(%eax), %eax
+ movl %eax, (%esp)
+ leal 888(%esp), %ecx
+ movl 1108(%esp), %edx
+ movl 116(%esp), %ebx # 4-byte Reload
+ calll .LmulPv480x32
+ movl 72(%esp), %eax # 4-byte Reload
+ addl 888(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 948(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 944(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 940(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 936(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 932(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 928(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 924(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 920(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 916(%esp), %ebx
+ movl 912(%esp), %edi
+ movl 908(%esp), %esi
+ movl 904(%esp), %edx
+ movl 900(%esp), %ecx
+ movl 892(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 896(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 1104(%esp), %eax
+ movl 72(%esp), %ebp # 4-byte Reload
+ movl %ebp, 8(%eax)
+ movl 16(%esp), %eax # 4-byte Reload
+ adcl %eax, 112(%esp) # 4-byte Folded Spill
+ movl 20(%esp), %eax # 4-byte Reload
+ adcl %eax, 36(%esp) # 4-byte Folded Spill
+ adcl 24(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 16(%esp) # 4-byte Spill
+ adcl 28(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 20(%esp) # 4-byte Spill
+ adcl 68(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 24(%esp) # 4-byte Spill
+ adcl 60(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 28(%esp) # 4-byte Spill
+ adcl 32(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 12(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 40(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl %eax, 52(%esp) # 4-byte Folded Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 84(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 92(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 100(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 108(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 56(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 104(%esp) # 4-byte Spill
+ adcl $0, 44(%esp) # 4-byte Folded Spill
+ movl 1112(%esp), %eax
+ movl 12(%eax), %eax
+ movl %eax, (%esp)
+ leal 824(%esp), %ecx
+ movl 1108(%esp), %edx
+ movl 116(%esp), %ebx # 4-byte Reload
+ calll .LmulPv480x32
+ movl 112(%esp), %eax # 4-byte Reload
+ addl 824(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 884(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 880(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 876(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 872(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 868(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 864(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 860(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 856(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 852(%esp), %ebx
+ movl 848(%esp), %edi
+ movl 844(%esp), %esi
+ movl 840(%esp), %edx
+ movl 836(%esp), %ecx
+ movl 828(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 832(%esp), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 1104(%esp), %eax
+ movl 112(%esp), %ebp # 4-byte Reload
+ movl %ebp, 12(%eax)
+ movl 60(%esp), %ebp # 4-byte Reload
+ adcl 36(%esp), %ebp # 4-byte Folded Reload
+ movl 16(%esp), %eax # 4-byte Reload
+ adcl %eax, 32(%esp) # 4-byte Folded Spill
+ adcl 20(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 16(%esp) # 4-byte Spill
+ adcl 24(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 20(%esp) # 4-byte Spill
+ adcl 28(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 24(%esp) # 4-byte Spill
+ adcl 12(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 28(%esp) # 4-byte Spill
+ adcl 64(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 12(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 52(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl %eax, 48(%esp) # 4-byte Folded Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl %eax, 56(%esp) # 4-byte Folded Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 88(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 96(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 104(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 44(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 100(%esp) # 4-byte Spill
+ adcl $0, 40(%esp) # 4-byte Folded Spill
+ movl 1112(%esp), %eax
+ movl 16(%eax), %eax
+ movl %eax, (%esp)
+ leal 760(%esp), %ecx
+ movl 1108(%esp), %edx
+ movl 116(%esp), %ebx # 4-byte Reload
+ calll .LmulPv480x32
+ addl 760(%esp), %ebp
+ movl %ebp, 60(%esp) # 4-byte Spill
+ movl 820(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 816(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 812(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 808(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 804(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 800(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 796(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 792(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 788(%esp), %ebx
+ movl 784(%esp), %edi
+ movl 780(%esp), %esi
+ movl 776(%esp), %edx
+ movl 772(%esp), %ecx
+ movl 764(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 768(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 1104(%esp), %eax
+ movl 60(%esp), %ebp # 4-byte Reload
+ movl %ebp, 16(%eax)
+ movl 112(%esp), %eax # 4-byte Reload
+ adcl 32(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 16(%esp), %eax # 4-byte Reload
+ adcl %eax, 36(%esp) # 4-byte Folded Spill
+ adcl 20(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 20(%esp) # 4-byte Spill
+ adcl 24(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 24(%esp) # 4-byte Spill
+ adcl 28(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 28(%esp) # 4-byte Spill
+ adcl 12(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 60(%esp) # 4-byte Spill
+ adcl 68(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 12(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 48(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 56(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl %eax, 52(%esp) # 4-byte Folded Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 84(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 92(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 100(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 40(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 96(%esp) # 4-byte Spill
+ adcl $0, 44(%esp) # 4-byte Folded Spill
+ movl 1112(%esp), %eax
+ movl 20(%eax), %eax
+ movl %eax, (%esp)
+ leal 696(%esp), %ecx
+ movl 1108(%esp), %edx
+ movl 116(%esp), %ebx # 4-byte Reload
+ calll .LmulPv480x32
+ movl 112(%esp), %eax # 4-byte Reload
+ addl 696(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 756(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 752(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 748(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 744(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 740(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 736(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 732(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 728(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 724(%esp), %ebx
+ movl 720(%esp), %edi
+ movl 716(%esp), %esi
+ movl 712(%esp), %edx
+ movl 708(%esp), %ecx
+ movl 700(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 704(%esp), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 1104(%esp), %eax
+ movl 112(%esp), %ebp # 4-byte Reload
+ movl %ebp, 20(%eax)
+ movl 56(%esp), %ebp # 4-byte Reload
+ adcl 36(%esp), %ebp # 4-byte Folded Reload
+ movl 20(%esp), %eax # 4-byte Reload
+ adcl %eax, 32(%esp) # 4-byte Folded Spill
+ adcl 24(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 16(%esp) # 4-byte Spill
+ adcl 28(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 20(%esp) # 4-byte Spill
+ adcl 60(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 24(%esp) # 4-byte Spill
+ adcl 12(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 28(%esp) # 4-byte Spill
+ adcl 64(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 64(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 108(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 52(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl %eax, 48(%esp) # 4-byte Folded Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 80(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 88(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 96(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 44(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 100(%esp) # 4-byte Spill
+ adcl $0, 40(%esp) # 4-byte Folded Spill
+ movl 1112(%esp), %eax
+ movl 24(%eax), %eax
+ movl %eax, (%esp)
+ leal 632(%esp), %ecx
+ movl 1108(%esp), %edx
+ movl 116(%esp), %ebx # 4-byte Reload
+ calll .LmulPv480x32
+ addl 632(%esp), %ebp
+ movl %ebp, 56(%esp) # 4-byte Spill
+ movl 692(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 688(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 684(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 680(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 676(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 672(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 668(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 664(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 660(%esp), %ebx
+ movl 656(%esp), %edi
+ movl 652(%esp), %esi
+ movl 648(%esp), %edx
+ movl 644(%esp), %ecx
+ movl 636(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 640(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 1104(%esp), %eax
+ movl 56(%esp), %ebp # 4-byte Reload
+ movl %ebp, 24(%eax)
+ movl 112(%esp), %eax # 4-byte Reload
+ adcl 32(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 16(%esp), %eax # 4-byte Reload
+ adcl %eax, 36(%esp) # 4-byte Folded Spill
+ adcl 20(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 16(%esp) # 4-byte Spill
+ adcl 24(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 20(%esp) # 4-byte Spill
+ adcl 28(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 24(%esp) # 4-byte Spill
+ adcl 64(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 28(%esp) # 4-byte Spill
+ adcl 68(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 68(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 104(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 48(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl %eax, 52(%esp) # 4-byte Folded Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 84(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 92(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 100(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 40(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 96(%esp) # 4-byte Spill
+ adcl $0, 44(%esp) # 4-byte Folded Spill
+ movl 1112(%esp), %eax
+ movl 28(%eax), %eax
+ movl %eax, (%esp)
+ leal 568(%esp), %ecx
+ movl 1108(%esp), %edx
+ movl 116(%esp), %ebx # 4-byte Reload
+ calll .LmulPv480x32
+ movl 112(%esp), %eax # 4-byte Reload
+ addl 568(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 628(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 624(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 620(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 616(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 612(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 608(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 604(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 600(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 596(%esp), %ebx
+ movl 592(%esp), %edi
+ movl 588(%esp), %esi
+ movl 584(%esp), %edx
+ movl 580(%esp), %ecx
+ movl 572(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 576(%esp), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 1104(%esp), %eax
+ movl 112(%esp), %ebp # 4-byte Reload
+ movl %ebp, 28(%eax)
+ movl 56(%esp), %ebp # 4-byte Reload
+ adcl 36(%esp), %ebp # 4-byte Folded Reload
+ movl 16(%esp), %eax # 4-byte Reload
+ adcl %eax, 32(%esp) # 4-byte Folded Spill
+ adcl 20(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 16(%esp) # 4-byte Spill
+ adcl 24(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 20(%esp) # 4-byte Spill
+ adcl 28(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 24(%esp) # 4-byte Spill
+ adcl 68(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 28(%esp) # 4-byte Spill
+ adcl 60(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 12(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 108(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 52(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl %eax, 48(%esp) # 4-byte Folded Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 80(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 88(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 96(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 44(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 100(%esp) # 4-byte Spill
+ adcl $0, 40(%esp) # 4-byte Folded Spill
+ movl 1112(%esp), %eax
+ movl 32(%eax), %eax
+ movl %eax, (%esp)
+ leal 504(%esp), %ecx
+ movl 1108(%esp), %edx
+ movl 116(%esp), %ebx # 4-byte Reload
+ calll .LmulPv480x32
+ addl 504(%esp), %ebp
+ movl %ebp, 56(%esp) # 4-byte Spill
+ movl 564(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 560(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 556(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 552(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 548(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 544(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 540(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 536(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 532(%esp), %ebx
+ movl 528(%esp), %edi
+ movl 524(%esp), %esi
+ movl 520(%esp), %edx
+ movl 516(%esp), %ecx
+ movl 508(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 512(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 1104(%esp), %eax
+ movl 56(%esp), %ebp # 4-byte Reload
+ movl %ebp, 32(%eax)
+ movl 112(%esp), %eax # 4-byte Reload
+ adcl 32(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 16(%esp), %eax # 4-byte Reload
+ adcl %eax, 36(%esp) # 4-byte Folded Spill
+ adcl 20(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 16(%esp) # 4-byte Spill
+ adcl 24(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 20(%esp) # 4-byte Spill
+ adcl 28(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 24(%esp) # 4-byte Spill
+ adcl 12(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 28(%esp) # 4-byte Spill
+ adcl 64(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 12(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 104(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 48(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl %eax, 52(%esp) # 4-byte Folded Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 84(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 92(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 100(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 40(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 96(%esp) # 4-byte Spill
+ adcl $0, 44(%esp) # 4-byte Folded Spill
+ movl 1112(%esp), %eax
+ movl 36(%eax), %eax
+ movl %eax, (%esp)
+ leal 440(%esp), %ecx
+ movl 1108(%esp), %edx
+ movl 116(%esp), %ebx # 4-byte Reload
+ calll .LmulPv480x32
+ movl 112(%esp), %eax # 4-byte Reload
+ addl 440(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 500(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 496(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 492(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 488(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 484(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 480(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 476(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 472(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 468(%esp), %ebx
+ movl 464(%esp), %edi
+ movl 460(%esp), %esi
+ movl 456(%esp), %edx
+ movl 452(%esp), %ecx
+ movl 444(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 448(%esp), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 1104(%esp), %eax
+ movl 112(%esp), %ebp # 4-byte Reload
+ movl %ebp, 36(%eax)
+ movl 56(%esp), %ebp # 4-byte Reload
+ adcl 36(%esp), %ebp # 4-byte Folded Reload
+ movl 16(%esp), %eax # 4-byte Reload
+ adcl %eax, 32(%esp) # 4-byte Folded Spill
+ adcl 20(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 16(%esp) # 4-byte Spill
+ adcl 24(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 20(%esp) # 4-byte Spill
+ adcl 28(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 24(%esp) # 4-byte Spill
+ adcl 12(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 28(%esp) # 4-byte Spill
+ adcl 60(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 12(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 108(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 52(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl %eax, 48(%esp) # 4-byte Folded Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 80(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 88(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 96(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 44(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 92(%esp) # 4-byte Spill
+ adcl $0, 40(%esp) # 4-byte Folded Spill
+ movl 1112(%esp), %eax
+ movl 40(%eax), %eax
+ movl %eax, (%esp)
+ leal 376(%esp), %ecx
+ movl 1108(%esp), %edx
+ movl 116(%esp), %ebx # 4-byte Reload
+ calll .LmulPv480x32
+ addl 376(%esp), %ebp
+ movl %ebp, 56(%esp) # 4-byte Spill
+ movl 436(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 432(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 428(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 424(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 420(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 416(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 412(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 408(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 404(%esp), %ebx
+ movl 400(%esp), %edi
+ movl 396(%esp), %esi
+ movl 392(%esp), %edx
+ movl 388(%esp), %ecx
+ movl 380(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 384(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 1104(%esp), %eax
+ movl 56(%esp), %ebp # 4-byte Reload
+ movl %ebp, 40(%eax)
+ movl 112(%esp), %eax # 4-byte Reload
+ adcl 32(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 16(%esp), %eax # 4-byte Reload
+ adcl %eax, 36(%esp) # 4-byte Folded Spill
+ adcl 20(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 20(%esp) # 4-byte Spill
+ adcl 24(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 24(%esp) # 4-byte Spill
+ adcl 28(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 28(%esp) # 4-byte Spill
+ adcl 12(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 32(%esp) # 4-byte Spill
+ adcl 64(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 16(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 104(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 48(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl %eax, 52(%esp) # 4-byte Folded Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 76(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 84(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 92(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 40(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 96(%esp) # 4-byte Spill
+ adcl $0, 44(%esp) # 4-byte Folded Spill
+ movl 1112(%esp), %eax
+ movl 44(%eax), %eax
+ movl %eax, (%esp)
+ leal 312(%esp), %ecx
+ movl 1108(%esp), %edx
+ movl 116(%esp), %ebx # 4-byte Reload
+ calll .LmulPv480x32
+ movl 112(%esp), %eax # 4-byte Reload
+ addl 312(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 372(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 368(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 364(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 360(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 356(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 352(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 348(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 344(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 340(%esp), %ebx
+ movl 336(%esp), %edi
+ movl 332(%esp), %esi
+ movl 328(%esp), %edx
+ movl 324(%esp), %ecx
+ movl 316(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 320(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 1104(%esp), %eax
+ movl 112(%esp), %ebp # 4-byte Reload
+ movl %ebp, 44(%eax)
+ movl 36(%esp), %eax # 4-byte Reload
+ adcl %eax, 108(%esp) # 4-byte Folded Spill
+ movl 20(%esp), %eax # 4-byte Reload
+ adcl %eax, 40(%esp) # 4-byte Folded Spill
+ adcl 24(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 20(%esp) # 4-byte Spill
+ adcl 28(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 24(%esp) # 4-byte Spill
+ adcl 32(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 28(%esp) # 4-byte Spill
+ adcl 16(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 32(%esp) # 4-byte Spill
+ adcl 60(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 16(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 100(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 52(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl %eax, 56(%esp) # 4-byte Folded Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 80(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 88(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 96(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 44(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 92(%esp) # 4-byte Spill
+ adcl $0, 48(%esp) # 4-byte Folded Spill
+ movl 1112(%esp), %eax
+ movl 48(%eax), %eax
+ movl %eax, (%esp)
+ leal 248(%esp), %ecx
+ movl 1108(%esp), %eax
+ movl %eax, %edx
+ movl 116(%esp), %ebx # 4-byte Reload
+ calll .LmulPv480x32
+ movl 108(%esp), %eax # 4-byte Reload
+ addl 248(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 308(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 304(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 300(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 296(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 292(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 288(%esp), %esi
+ movl %esi, 52(%esp) # 4-byte Spill
+ movl 284(%esp), %esi
+ movl %esi, 100(%esp) # 4-byte Spill
+ movl 280(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 276(%esp), %ebx
+ movl 272(%esp), %edi
+ movl 268(%esp), %edx
+ movl 264(%esp), %ecx
+ movl 260(%esp), %eax
+ movl 252(%esp), %esi
+ movl %esi, 112(%esp) # 4-byte Spill
+ movl 256(%esp), %esi
+ movl %esi, 36(%esp) # 4-byte Spill
+ movl 108(%esp), %esi # 4-byte Reload
+ movl 1104(%esp), %ebp
+ movl %esi, 48(%ebp)
+ movl 112(%esp), %esi # 4-byte Reload
+ adcl 40(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 112(%esp) # 4-byte Spill
+ movl 20(%esp), %esi # 4-byte Reload
+ adcl %esi, 36(%esp) # 4-byte Folded Spill
+ adcl 24(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 12(%esp) # 4-byte Spill
+ adcl 28(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 20(%esp) # 4-byte Spill
+ adcl 32(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 24(%esp) # 4-byte Spill
+ adcl 16(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 28(%esp) # 4-byte Spill
+ adcl 64(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 32(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 104(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 100(%esp), %esi # 4-byte Reload
+ adcl 56(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 100(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl %eax, 52(%esp) # 4-byte Folded Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 76(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 84(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 92(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 48(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 96(%esp) # 4-byte Spill
+ adcl $0, 44(%esp) # 4-byte Folded Spill
+ movl 1112(%esp), %eax
+ movl 52(%eax), %eax
+ movl %eax, (%esp)
+ leal 184(%esp), %ecx
+ movl 1108(%esp), %edx
+ movl 116(%esp), %ebx # 4-byte Reload
+ calll .LmulPv480x32
+ movl 112(%esp), %eax # 4-byte Reload
+ addl 184(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 244(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 240(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 236(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 232(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 228(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 224(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 220(%esp), %ebp
+ movl %ebp, 64(%esp) # 4-byte Spill
+ movl 216(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 212(%esp), %ebx
+ movl 208(%esp), %edx
+ movl 204(%esp), %ecx
+ movl 200(%esp), %esi
+ movl %esi, 40(%esp) # 4-byte Spill
+ movl 196(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 188(%esp), %eax
+ movl 192(%esp), %esi
+ movl 112(%esp), %ebp # 4-byte Reload
+ movl 1104(%esp), %edi
+ movl %ebp, 52(%edi)
+ adcl 36(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl %esi, %ebp
+ adcl 12(%esp), %ebp # 4-byte Folded Reload
+ movl 72(%esp), %esi # 4-byte Reload
+ adcl 20(%esp), %esi # 4-byte Folded Reload
+ movl 24(%esp), %edi # 4-byte Reload
+ adcl %edi, 40(%esp) # 4-byte Folded Spill
+ adcl 28(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 28(%esp) # 4-byte Spill
+ adcl 32(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 36(%esp) # 4-byte Spill
+ adcl 60(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 60(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 100(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 52(%esp), %ebx # 4-byte Reload
+ adcl %ebx, 64(%esp) # 4-byte Folded Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 68(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 80(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 88(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 96(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl %eax, 48(%esp) # 4-byte Folded Spill
+ adcl $0, 56(%esp) # 4-byte Folded Spill
+ movl 1112(%esp), %eax
+ movl 56(%eax), %eax
+ movl %eax, (%esp)
+ leal 120(%esp), %ecx
+ movl 1108(%esp), %edx
+ movl 116(%esp), %ebx # 4-byte Reload
+ calll .LmulPv480x32
+ movl 112(%esp), %eax # 4-byte Reload
+ addl 120(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ adcl 124(%esp), %ebp
+ movl %ebp, 32(%esp) # 4-byte Spill
+ adcl 128(%esp), %esi
+ movl %esi, 72(%esp) # 4-byte Spill
+ movl 180(%esp), %eax
+ movl %eax, 116(%esp) # 4-byte Spill
+ movl 176(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 172(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 168(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 164(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 160(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 156(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 152(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 148(%esp), %ebp
+ movl 144(%esp), %edi
+ movl 140(%esp), %esi
+ movl 136(%esp), %edx
+ movl 132(%esp), %ecx
+ movl 1104(%esp), %eax
+ movl 112(%esp), %ebx # 4-byte Reload
+ movl %ebx, 56(%eax)
+ movl 32(%esp), %ebx # 4-byte Reload
+ movl %ebx, 60(%eax)
+ adcl 40(%esp), %ecx # 4-byte Folded Reload
+ movl 72(%esp), %ebx # 4-byte Reload
+ movl %ebx, 64(%eax)
+ adcl 28(%esp), %edx # 4-byte Folded Reload
+ movl %ecx, 68(%eax)
+ adcl 36(%esp), %esi # 4-byte Folded Reload
+ movl %edx, 72(%eax)
+ adcl 60(%esp), %edi # 4-byte Folded Reload
+ movl %esi, 76(%eax)
+ adcl 76(%esp), %ebp # 4-byte Folded Reload
+ movl %edi, 80(%eax)
+ movl 44(%esp), %edx # 4-byte Reload
+ adcl 64(%esp), %edx # 4-byte Folded Reload
+ movl %ebp, 84(%eax)
+ movl 52(%esp), %ecx # 4-byte Reload
+ adcl 84(%esp), %ecx # 4-byte Folded Reload
+ movl %edx, 88(%eax)
+ movl 68(%esp), %edx # 4-byte Reload
+ adcl 92(%esp), %edx # 4-byte Folded Reload
+ movl %ecx, 92(%eax)
+ movl 80(%esp), %ecx # 4-byte Reload
+ adcl 104(%esp), %ecx # 4-byte Folded Reload
+ movl %edx, 96(%eax)
+ movl 88(%esp), %edx # 4-byte Reload
+ adcl 108(%esp), %edx # 4-byte Folded Reload
+ movl %ecx, 100(%eax)
+ movl 96(%esp), %ecx # 4-byte Reload
+ adcl 48(%esp), %ecx # 4-byte Folded Reload
+ movl %edx, 104(%eax)
+ movl %ecx, 108(%eax)
+ movl 100(%esp), %ecx # 4-byte Reload
+ adcl 56(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 112(%eax)
+ movl 116(%esp), %ecx # 4-byte Reload
+ adcl $0, %ecx
+ movl %ecx, 116(%eax)
+ addl $1084, %esp # imm = 0x43C
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end224:
+ .size mcl_fpDbl_mulPre15L, .Lfunc_end224-mcl_fpDbl_mulPre15L
+
+ .globl mcl_fpDbl_sqrPre15L
+ .align 16, 0x90
+ .type mcl_fpDbl_sqrPre15L,@function
+mcl_fpDbl_sqrPre15L: # @mcl_fpDbl_sqrPre15L
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $1084, %esp # imm = 0x43C
+ calll .L225$pb
+.L225$pb:
+ popl %ebx
+.Ltmp46:
+ addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp46-.L225$pb), %ebx
+ movl %ebx, 116(%esp) # 4-byte Spill
+ movl 1108(%esp), %edx
+ movl (%edx), %eax
+ movl %eax, (%esp)
+ leal 1016(%esp), %ecx
+ movl %edx, %edi
+ movl %ebx, %esi
+ calll .LmulPv480x32
+ movl 1076(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 1072(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 1068(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 1064(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 1060(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 1056(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 1052(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 1048(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 1044(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 1040(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 1036(%esp), %eax
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl 1032(%esp), %eax
+ movl %eax, 24(%esp) # 4-byte Spill
+ movl 1028(%esp), %eax
+ movl %eax, 20(%esp) # 4-byte Spill
+ movl 1024(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 1016(%esp), %eax
+ movl 1020(%esp), %ebp
+ movl 1104(%esp), %ecx
+ movl %eax, (%ecx)
+ movl %edi, %edx
+ movl 4(%edx), %eax
+ movl %eax, (%esp)
+ leal 952(%esp), %ecx
+ movl %esi, %ebx
+ calll .LmulPv480x32
+ addl 952(%esp), %ebp
+ movl %ebp, 16(%esp) # 4-byte Spill
+ movl 1012(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 1008(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 1004(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 1000(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 996(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 992(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 988(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 984(%esp), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 980(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 976(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 972(%esp), %edi
+ movl 968(%esp), %esi
+ movl 964(%esp), %edx
+ movl 956(%esp), %eax
+ movl 960(%esp), %ecx
+ movl 1104(%esp), %ebp
+ movl 16(%esp), %ebx # 4-byte Reload
+ movl %ebx, 4(%ebp)
+ adcl 72(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 72(%esp) # 4-byte Spill
+ adcl 20(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 16(%esp) # 4-byte Spill
+ adcl 24(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 20(%esp) # 4-byte Spill
+ adcl 28(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 24(%esp) # 4-byte Spill
+ adcl 36(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 28(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 44(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 52(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl %eax, 32(%esp) # 4-byte Folded Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl %eax, 40(%esp) # 4-byte Folded Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl %eax, 48(%esp) # 4-byte Folded Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 88(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 96(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 104(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 112(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 108(%esp) # 4-byte Spill
+ adcl $0, 56(%esp) # 4-byte Folded Spill
+ movl 1108(%esp), %edx
+ movl 8(%edx), %eax
+ movl %eax, (%esp)
+ leal 888(%esp), %ecx
+ movl 116(%esp), %ebx # 4-byte Reload
+ calll .LmulPv480x32
+ movl 72(%esp), %eax # 4-byte Reload
+ addl 888(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 948(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 944(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 940(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 936(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 932(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 928(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 924(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 920(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 916(%esp), %ebx
+ movl 912(%esp), %edi
+ movl 908(%esp), %esi
+ movl 904(%esp), %edx
+ movl 900(%esp), %ecx
+ movl 892(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 896(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 1104(%esp), %eax
+ movl 72(%esp), %ebp # 4-byte Reload
+ movl %ebp, 8(%eax)
+ movl 16(%esp), %eax # 4-byte Reload
+ adcl %eax, 112(%esp) # 4-byte Folded Spill
+ movl 20(%esp), %eax # 4-byte Reload
+ adcl %eax, 36(%esp) # 4-byte Folded Spill
+ adcl 24(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 16(%esp) # 4-byte Spill
+ adcl 28(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 20(%esp) # 4-byte Spill
+ adcl 68(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 24(%esp) # 4-byte Spill
+ adcl 60(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 28(%esp) # 4-byte Spill
+ adcl 32(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 12(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 40(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl %eax, 52(%esp) # 4-byte Folded Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 84(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 92(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 100(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 108(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 56(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 104(%esp) # 4-byte Spill
+ adcl $0, 44(%esp) # 4-byte Folded Spill
+ movl 1108(%esp), %edx
+ movl 12(%edx), %eax
+ movl %eax, (%esp)
+ leal 824(%esp), %ecx
+ movl 116(%esp), %ebx # 4-byte Reload
+ calll .LmulPv480x32
+ movl 112(%esp), %eax # 4-byte Reload
+ addl 824(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 884(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 880(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 876(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 872(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 868(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 864(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 860(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 856(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 852(%esp), %ebx
+ movl 848(%esp), %edi
+ movl 844(%esp), %esi
+ movl 840(%esp), %edx
+ movl 836(%esp), %ecx
+ movl 828(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 832(%esp), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 1104(%esp), %eax
+ movl 112(%esp), %ebp # 4-byte Reload
+ movl %ebp, 12(%eax)
+ movl 60(%esp), %ebp # 4-byte Reload
+ adcl 36(%esp), %ebp # 4-byte Folded Reload
+ movl 16(%esp), %eax # 4-byte Reload
+ adcl %eax, 32(%esp) # 4-byte Folded Spill
+ adcl 20(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 16(%esp) # 4-byte Spill
+ adcl 24(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 20(%esp) # 4-byte Spill
+ adcl 28(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 24(%esp) # 4-byte Spill
+ adcl 12(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 28(%esp) # 4-byte Spill
+ adcl 64(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 12(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 52(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl %eax, 48(%esp) # 4-byte Folded Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl %eax, 56(%esp) # 4-byte Folded Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 88(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 96(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 104(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 44(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 100(%esp) # 4-byte Spill
+ adcl $0, 40(%esp) # 4-byte Folded Spill
+ movl 1108(%esp), %edx
+ movl 16(%edx), %eax
+ movl %eax, (%esp)
+ leal 760(%esp), %ecx
+ movl 116(%esp), %ebx # 4-byte Reload
+ calll .LmulPv480x32
+ addl 760(%esp), %ebp
+ movl %ebp, 60(%esp) # 4-byte Spill
+ movl 820(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 816(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 812(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 808(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 804(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 800(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 796(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 792(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 788(%esp), %ebx
+ movl 784(%esp), %edi
+ movl 780(%esp), %esi
+ movl 776(%esp), %edx
+ movl 772(%esp), %ecx
+ movl 764(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 768(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 1104(%esp), %eax
+ movl 60(%esp), %ebp # 4-byte Reload
+ movl %ebp, 16(%eax)
+ movl 112(%esp), %eax # 4-byte Reload
+ adcl 32(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 16(%esp), %eax # 4-byte Reload
+ adcl %eax, 36(%esp) # 4-byte Folded Spill
+ adcl 20(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 20(%esp) # 4-byte Spill
+ adcl 24(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 24(%esp) # 4-byte Spill
+ adcl 28(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 28(%esp) # 4-byte Spill
+ adcl 12(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 60(%esp) # 4-byte Spill
+ adcl 68(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 12(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 48(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 56(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl %eax, 52(%esp) # 4-byte Folded Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 84(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 92(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 100(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 40(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 96(%esp) # 4-byte Spill
+ adcl $0, 44(%esp) # 4-byte Folded Spill
+ movl 1108(%esp), %edx
+ movl 20(%edx), %eax
+ movl %eax, (%esp)
+ leal 696(%esp), %ecx
+ movl 116(%esp), %ebx # 4-byte Reload
+ calll .LmulPv480x32
+ movl 112(%esp), %eax # 4-byte Reload
+ addl 696(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 756(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 752(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 748(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 744(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 740(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 736(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 732(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 728(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 724(%esp), %ebx
+ movl 720(%esp), %edi
+ movl 716(%esp), %esi
+ movl 712(%esp), %edx
+ movl 708(%esp), %ecx
+ movl 700(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 704(%esp), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 1104(%esp), %eax
+ movl 112(%esp), %ebp # 4-byte Reload
+ movl %ebp, 20(%eax)
+ movl 56(%esp), %ebp # 4-byte Reload
+ adcl 36(%esp), %ebp # 4-byte Folded Reload
+ movl 20(%esp), %eax # 4-byte Reload
+ adcl %eax, 32(%esp) # 4-byte Folded Spill
+ adcl 24(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 16(%esp) # 4-byte Spill
+ adcl 28(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 20(%esp) # 4-byte Spill
+ adcl 60(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 24(%esp) # 4-byte Spill
+ adcl 12(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 28(%esp) # 4-byte Spill
+ adcl 64(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 64(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 108(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 52(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl %eax, 48(%esp) # 4-byte Folded Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 80(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 88(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 96(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 44(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 100(%esp) # 4-byte Spill
+ adcl $0, 40(%esp) # 4-byte Folded Spill
+ movl 1108(%esp), %edx
+ movl 24(%edx), %eax
+ movl %eax, (%esp)
+ leal 632(%esp), %ecx
+ movl 116(%esp), %ebx # 4-byte Reload
+ calll .LmulPv480x32
+ addl 632(%esp), %ebp
+ movl %ebp, 56(%esp) # 4-byte Spill
+ movl 692(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 688(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 684(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 680(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 676(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 672(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 668(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 664(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 660(%esp), %ebx
+ movl 656(%esp), %edi
+ movl 652(%esp), %esi
+ movl 648(%esp), %edx
+ movl 644(%esp), %ecx
+ movl 636(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 640(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 1104(%esp), %eax
+ movl 56(%esp), %ebp # 4-byte Reload
+ movl %ebp, 24(%eax)
+ movl 112(%esp), %eax # 4-byte Reload
+ adcl 32(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 16(%esp), %eax # 4-byte Reload
+ adcl %eax, 36(%esp) # 4-byte Folded Spill
+ adcl 20(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 16(%esp) # 4-byte Spill
+ adcl 24(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 20(%esp) # 4-byte Spill
+ adcl 28(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 24(%esp) # 4-byte Spill
+ adcl 64(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 28(%esp) # 4-byte Spill
+ adcl 68(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 68(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 104(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 48(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl %eax, 52(%esp) # 4-byte Folded Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 84(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 92(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 100(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 40(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 96(%esp) # 4-byte Spill
+ adcl $0, 44(%esp) # 4-byte Folded Spill
+ movl 1108(%esp), %edx
+ movl 28(%edx), %eax
+ movl %eax, (%esp)
+ leal 568(%esp), %ecx
+ movl 116(%esp), %ebx # 4-byte Reload
+ calll .LmulPv480x32
+ movl 112(%esp), %eax # 4-byte Reload
+ addl 568(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 628(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 624(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 620(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 616(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 612(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 608(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 604(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 600(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 596(%esp), %ebx
+ movl 592(%esp), %edi
+ movl 588(%esp), %esi
+ movl 584(%esp), %edx
+ movl 580(%esp), %ecx
+ movl 572(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 576(%esp), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 1104(%esp), %eax
+ movl 112(%esp), %ebp # 4-byte Reload
+ movl %ebp, 28(%eax)
+ movl 56(%esp), %ebp # 4-byte Reload
+ adcl 36(%esp), %ebp # 4-byte Folded Reload
+ movl 16(%esp), %eax # 4-byte Reload
+ adcl %eax, 32(%esp) # 4-byte Folded Spill
+ adcl 20(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 16(%esp) # 4-byte Spill
+ adcl 24(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 20(%esp) # 4-byte Spill
+ adcl 28(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 24(%esp) # 4-byte Spill
+ adcl 68(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 28(%esp) # 4-byte Spill
+ adcl 60(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 12(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 108(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 52(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl %eax, 48(%esp) # 4-byte Folded Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 80(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 88(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 96(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 44(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 100(%esp) # 4-byte Spill
+ adcl $0, 40(%esp) # 4-byte Folded Spill
+ movl 1108(%esp), %edx
+ movl 32(%edx), %eax
+ movl %eax, (%esp)
+ leal 504(%esp), %ecx
+ movl 116(%esp), %ebx # 4-byte Reload
+ calll .LmulPv480x32
+ addl 504(%esp), %ebp
+ movl %ebp, 56(%esp) # 4-byte Spill
+ movl 564(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 560(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 556(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 552(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 548(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 544(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 540(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 536(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 532(%esp), %ebx
+ movl 528(%esp), %edi
+ movl 524(%esp), %esi
+ movl 520(%esp), %edx
+ movl 516(%esp), %ecx
+ movl 508(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 512(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 1104(%esp), %eax
+ movl 56(%esp), %ebp # 4-byte Reload
+ movl %ebp, 32(%eax)
+ movl 112(%esp), %eax # 4-byte Reload
+ adcl 32(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 16(%esp), %eax # 4-byte Reload
+ adcl %eax, 36(%esp) # 4-byte Folded Spill
+ adcl 20(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 16(%esp) # 4-byte Spill
+ adcl 24(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 20(%esp) # 4-byte Spill
+ adcl 28(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 24(%esp) # 4-byte Spill
+ adcl 12(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 28(%esp) # 4-byte Spill
+ adcl 64(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 12(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 104(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 48(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl %eax, 52(%esp) # 4-byte Folded Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 84(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 92(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 100(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 40(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 96(%esp) # 4-byte Spill
+ adcl $0, 44(%esp) # 4-byte Folded Spill
+ movl 1108(%esp), %edx
+ movl 36(%edx), %eax
+ movl %eax, (%esp)
+ leal 440(%esp), %ecx
+ movl 116(%esp), %ebx # 4-byte Reload
+ calll .LmulPv480x32
+ movl 112(%esp), %eax # 4-byte Reload
+ addl 440(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 500(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 496(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 492(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 488(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 484(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 480(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 476(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 472(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 468(%esp), %ebx
+ movl 464(%esp), %edi
+ movl 460(%esp), %esi
+ movl 456(%esp), %edx
+ movl 452(%esp), %ecx
+ movl 444(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 448(%esp), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 1104(%esp), %eax
+ movl 112(%esp), %ebp # 4-byte Reload
+ movl %ebp, 36(%eax)
+ movl 56(%esp), %ebp # 4-byte Reload
+ adcl 36(%esp), %ebp # 4-byte Folded Reload
+ movl 16(%esp), %eax # 4-byte Reload
+ adcl %eax, 32(%esp) # 4-byte Folded Spill
+ adcl 20(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 16(%esp) # 4-byte Spill
+ adcl 24(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 20(%esp) # 4-byte Spill
+ adcl 28(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 24(%esp) # 4-byte Spill
+ adcl 12(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 28(%esp) # 4-byte Spill
+ adcl 60(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 12(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 108(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 52(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl %eax, 48(%esp) # 4-byte Folded Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 80(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 88(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 96(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 44(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 92(%esp) # 4-byte Spill
+ adcl $0, 40(%esp) # 4-byte Folded Spill
+ movl 1108(%esp), %edx
+ movl 40(%edx), %eax
+ movl %eax, (%esp)
+ leal 376(%esp), %ecx
+ movl 116(%esp), %ebx # 4-byte Reload
+ calll .LmulPv480x32
+ addl 376(%esp), %ebp
+ movl %ebp, 56(%esp) # 4-byte Spill
+ movl 436(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 432(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 428(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 424(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 420(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 416(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 412(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 408(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 404(%esp), %ebx
+ movl 400(%esp), %edi
+ movl 396(%esp), %esi
+ movl 392(%esp), %edx
+ movl 388(%esp), %ecx
+ movl 380(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 384(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 1104(%esp), %eax
+ movl 56(%esp), %ebp # 4-byte Reload
+ movl %ebp, 40(%eax)
+ movl 112(%esp), %eax # 4-byte Reload
+ adcl 32(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 16(%esp), %eax # 4-byte Reload
+ adcl %eax, 36(%esp) # 4-byte Folded Spill
+ adcl 20(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 20(%esp) # 4-byte Spill
+ adcl 24(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 24(%esp) # 4-byte Spill
+ adcl 28(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 28(%esp) # 4-byte Spill
+ adcl 12(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 32(%esp) # 4-byte Spill
+ adcl 64(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 16(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 104(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 48(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl %eax, 52(%esp) # 4-byte Folded Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 76(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 84(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 92(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 40(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 96(%esp) # 4-byte Spill
+ adcl $0, 44(%esp) # 4-byte Folded Spill
+ movl 1108(%esp), %edx
+ movl 44(%edx), %eax
+ movl %eax, (%esp)
+ leal 312(%esp), %ecx
+ movl 116(%esp), %ebx # 4-byte Reload
+ calll .LmulPv480x32
+ movl 112(%esp), %eax # 4-byte Reload
+ addl 312(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 372(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 368(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 364(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 360(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 356(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 352(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 348(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 344(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 340(%esp), %ebx
+ movl 336(%esp), %edi
+ movl 332(%esp), %esi
+ movl 328(%esp), %edx
+ movl 324(%esp), %ecx
+ movl 316(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 320(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 1104(%esp), %eax
+ movl 112(%esp), %ebp # 4-byte Reload
+ movl %ebp, 44(%eax)
+ movl 36(%esp), %eax # 4-byte Reload
+ adcl %eax, 108(%esp) # 4-byte Folded Spill
+ movl 20(%esp), %eax # 4-byte Reload
+ adcl %eax, 40(%esp) # 4-byte Folded Spill
+ adcl 24(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 20(%esp) # 4-byte Spill
+ adcl 28(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 24(%esp) # 4-byte Spill
+ adcl 32(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 28(%esp) # 4-byte Spill
+ adcl 16(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 32(%esp) # 4-byte Spill
+ adcl 60(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 16(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 100(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 52(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl %eax, 56(%esp) # 4-byte Folded Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 80(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 88(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 96(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 44(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 92(%esp) # 4-byte Spill
+ adcl $0, 48(%esp) # 4-byte Folded Spill
+ movl 1108(%esp), %edx
+ movl 48(%edx), %eax
+ movl %eax, (%esp)
+ leal 248(%esp), %ecx
+ movl 116(%esp), %ebx # 4-byte Reload
+ calll .LmulPv480x32
+ movl 108(%esp), %eax # 4-byte Reload
+ addl 248(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 308(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 304(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 300(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 296(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 292(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 288(%esp), %esi
+ movl %esi, 52(%esp) # 4-byte Spill
+ movl 284(%esp), %esi
+ movl %esi, 100(%esp) # 4-byte Spill
+ movl 280(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 276(%esp), %ebx
+ movl 272(%esp), %edi
+ movl 268(%esp), %edx
+ movl 264(%esp), %ecx
+ movl 260(%esp), %eax
+ movl 252(%esp), %esi
+ movl %esi, 112(%esp) # 4-byte Spill
+ movl 256(%esp), %esi
+ movl %esi, 36(%esp) # 4-byte Spill
+ movl 108(%esp), %esi # 4-byte Reload
+ movl 1104(%esp), %ebp
+ movl %esi, 48(%ebp)
+ movl 112(%esp), %esi # 4-byte Reload
+ adcl 40(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 112(%esp) # 4-byte Spill
+ movl 20(%esp), %esi # 4-byte Reload
+ adcl %esi, 36(%esp) # 4-byte Folded Spill
+ adcl 24(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 12(%esp) # 4-byte Spill
+ adcl 28(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 20(%esp) # 4-byte Spill
+ adcl 32(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 24(%esp) # 4-byte Spill
+ adcl 16(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 28(%esp) # 4-byte Spill
+ adcl 64(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 32(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 104(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 100(%esp), %esi # 4-byte Reload
+ adcl 56(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 100(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl %eax, 52(%esp) # 4-byte Folded Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 76(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 84(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 92(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 48(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 96(%esp) # 4-byte Spill
+ adcl $0, 44(%esp) # 4-byte Folded Spill
+ movl 1108(%esp), %edx
+ movl 52(%edx), %eax
+ movl %eax, (%esp)
+ leal 184(%esp), %ecx
+ movl 116(%esp), %ebx # 4-byte Reload
+ calll .LmulPv480x32
+ movl 112(%esp), %eax # 4-byte Reload
+ addl 184(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 244(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 240(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 236(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 232(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 228(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 224(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 220(%esp), %ebp
+ movl %ebp, 64(%esp) # 4-byte Spill
+ movl 216(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 212(%esp), %ebx
+ movl 208(%esp), %edx
+ movl 204(%esp), %ecx
+ movl 200(%esp), %esi
+ movl %esi, 40(%esp) # 4-byte Spill
+ movl 196(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 188(%esp), %eax
+ movl 192(%esp), %esi
+ movl 112(%esp), %ebp # 4-byte Reload
+ movl 1104(%esp), %edi
+ movl %ebp, 52(%edi)
+ adcl 36(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl %esi, %ebp
+ adcl 12(%esp), %ebp # 4-byte Folded Reload
+ movl 72(%esp), %esi # 4-byte Reload
+ adcl 20(%esp), %esi # 4-byte Folded Reload
+ movl 24(%esp), %edi # 4-byte Reload
+ adcl %edi, 40(%esp) # 4-byte Folded Spill
+ adcl 28(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 28(%esp) # 4-byte Spill
+ adcl 32(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 36(%esp) # 4-byte Spill
+ adcl 60(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 60(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 100(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 52(%esp), %ebx # 4-byte Reload
+ adcl %ebx, 64(%esp) # 4-byte Folded Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 68(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 80(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 88(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 96(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl %eax, 48(%esp) # 4-byte Folded Spill
+ adcl $0, 56(%esp) # 4-byte Folded Spill
+ movl 1108(%esp), %edx
+ movl 56(%edx), %eax
+ movl %eax, (%esp)
+ leal 120(%esp), %ecx
+ movl 116(%esp), %ebx # 4-byte Reload
+ calll .LmulPv480x32
+ movl 112(%esp), %eax # 4-byte Reload
+ addl 120(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ adcl 124(%esp), %ebp
+ movl %ebp, 32(%esp) # 4-byte Spill
+ adcl 128(%esp), %esi
+ movl %esi, 72(%esp) # 4-byte Spill
+ movl 180(%esp), %eax
+ movl %eax, 116(%esp) # 4-byte Spill
+ movl 176(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 172(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 168(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 164(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 160(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 156(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 152(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 148(%esp), %ebp
+ movl 144(%esp), %edi
+ movl 140(%esp), %esi
+ movl 136(%esp), %edx
+ movl 132(%esp), %ecx
+ movl 1104(%esp), %eax
+ movl 112(%esp), %ebx # 4-byte Reload
+ movl %ebx, 56(%eax)
+ movl 32(%esp), %ebx # 4-byte Reload
+ movl %ebx, 60(%eax)
+ adcl 40(%esp), %ecx # 4-byte Folded Reload
+ movl 72(%esp), %ebx # 4-byte Reload
+ movl %ebx, 64(%eax)
+ adcl 28(%esp), %edx # 4-byte Folded Reload
+ movl %ecx, 68(%eax)
+ adcl 36(%esp), %esi # 4-byte Folded Reload
+ movl %edx, 72(%eax)
+ adcl 60(%esp), %edi # 4-byte Folded Reload
+ movl %esi, 76(%eax)
+ adcl 76(%esp), %ebp # 4-byte Folded Reload
+ movl %edi, 80(%eax)
+ movl 44(%esp), %edx # 4-byte Reload
+ adcl 64(%esp), %edx # 4-byte Folded Reload
+ movl %ebp, 84(%eax)
+ movl 52(%esp), %ecx # 4-byte Reload
+ adcl 84(%esp), %ecx # 4-byte Folded Reload
+ movl %edx, 88(%eax)
+ movl 68(%esp), %edx # 4-byte Reload
+ adcl 92(%esp), %edx # 4-byte Folded Reload
+ movl %ecx, 92(%eax)
+ movl 80(%esp), %ecx # 4-byte Reload
+ adcl 104(%esp), %ecx # 4-byte Folded Reload
+ movl %edx, 96(%eax)
+ movl 88(%esp), %edx # 4-byte Reload
+ adcl 108(%esp), %edx # 4-byte Folded Reload
+ movl %ecx, 100(%eax)
+ movl 96(%esp), %ecx # 4-byte Reload
+ adcl 48(%esp), %ecx # 4-byte Folded Reload
+ movl %edx, 104(%eax)
+ movl %ecx, 108(%eax)
+ movl 100(%esp), %ecx # 4-byte Reload
+ adcl 56(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 112(%eax)
+ movl 116(%esp), %ecx # 4-byte Reload
+ adcl $0, %ecx
+ movl %ecx, 116(%eax)
+ addl $1084, %esp # imm = 0x43C
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end225:
+ .size mcl_fpDbl_sqrPre15L, .Lfunc_end225-mcl_fpDbl_sqrPre15L
+
+ .globl mcl_fp_mont15L
+ .align 16, 0x90
+ .type mcl_fp_mont15L,@function
+mcl_fp_mont15L: # @mcl_fp_mont15L
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $2044, %esp # imm = 0x7FC
+ calll .L226$pb
+.L226$pb:
+ popl %ebx
+.Ltmp47:
+ addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp47-.L226$pb), %ebx
+ movl 2076(%esp), %eax
+ movl -4(%eax), %esi
+ movl %esi, 52(%esp) # 4-byte Spill
+ movl 2072(%esp), %eax
+ movl (%eax), %eax
+ movl %eax, (%esp)
+ leal 1976(%esp), %ecx
+ movl 2068(%esp), %edx
+ calll .LmulPv480x32
+ movl 1976(%esp), %ebp
+ movl 1980(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl %ebp, %eax
+ imull %esi, %eax
+ movl 2036(%esp), %ecx
+ movl %ecx, 108(%esp) # 4-byte Spill
+ movl 2032(%esp), %ecx
+ movl %ecx, 104(%esp) # 4-byte Spill
+ movl 2028(%esp), %ecx
+ movl %ecx, 112(%esp) # 4-byte Spill
+ movl 2024(%esp), %ecx
+ movl %ecx, 100(%esp) # 4-byte Spill
+ movl 2020(%esp), %ecx
+ movl %ecx, 76(%esp) # 4-byte Spill
+ movl 2016(%esp), %ecx
+ movl %ecx, 72(%esp) # 4-byte Spill
+ movl 2012(%esp), %ecx
+ movl %ecx, 68(%esp) # 4-byte Spill
+ movl 2008(%esp), %ecx
+ movl %ecx, 60(%esp) # 4-byte Spill
+ movl 2004(%esp), %ecx
+ movl %ecx, 56(%esp) # 4-byte Spill
+ movl 2000(%esp), %edi
+ movl 1996(%esp), %esi
+ movl 1992(%esp), %ecx
+ movl %ecx, 92(%esp) # 4-byte Spill
+ movl 1988(%esp), %ecx
+ movl %ecx, 84(%esp) # 4-byte Spill
+ movl 1984(%esp), %ecx
+ movl %ecx, 96(%esp) # 4-byte Spill
+ movl %eax, (%esp)
+ leal 1912(%esp), %ecx
+ movl 2076(%esp), %edx
+ calll .LmulPv480x32
+ addl 1912(%esp), %ebp
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 1916(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 1920(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 1924(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 1928(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ adcl 1932(%esp), %esi
+ adcl 1936(%esp), %edi
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 1940(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 1944(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 1948(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 1952(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 1956(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 1960(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 112(%esp), %eax # 4-byte Reload
+ adcl 1964(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 1968(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 108(%esp), %ebp # 4-byte Reload
+ adcl 1972(%esp), %ebp
+ sbbl %eax, %eax
+ movl %eax, 116(%esp) # 4-byte Spill
+ movl 2072(%esp), %eax
+ movl 4(%eax), %eax
+ movl %eax, (%esp)
+ leal 1848(%esp), %ecx
+ movl 2068(%esp), %edx
+ calll .LmulPv480x32
+ movl 116(%esp), %eax # 4-byte Reload
+ andl $1, %eax
+ movl 88(%esp), %edx # 4-byte Reload
+ addl 1848(%esp), %edx
+ movl 96(%esp), %ecx # 4-byte Reload
+ adcl 1852(%esp), %ecx
+ movl %ecx, 96(%esp) # 4-byte Spill
+ movl 84(%esp), %ecx # 4-byte Reload
+ adcl 1856(%esp), %ecx
+ movl %ecx, 84(%esp) # 4-byte Spill
+ movl 92(%esp), %ecx # 4-byte Reload
+ adcl 1860(%esp), %ecx
+ movl %ecx, 92(%esp) # 4-byte Spill
+ adcl 1864(%esp), %esi
+ movl %esi, 80(%esp) # 4-byte Spill
+ adcl 1868(%esp), %edi
+ movl 56(%esp), %ecx # 4-byte Reload
+ adcl 1872(%esp), %ecx
+ movl %ecx, 56(%esp) # 4-byte Spill
+ movl 60(%esp), %ecx # 4-byte Reload
+ adcl 1876(%esp), %ecx
+ movl %ecx, 60(%esp) # 4-byte Spill
+ movl 68(%esp), %ecx # 4-byte Reload
+ adcl 1880(%esp), %ecx
+ movl %ecx, 68(%esp) # 4-byte Spill
+ movl 72(%esp), %ecx # 4-byte Reload
+ adcl 1884(%esp), %ecx
+ movl %ecx, 72(%esp) # 4-byte Spill
+ movl 76(%esp), %ecx # 4-byte Reload
+ adcl 1888(%esp), %ecx
+ movl %ecx, 76(%esp) # 4-byte Spill
+ movl 100(%esp), %ecx # 4-byte Reload
+ adcl 1892(%esp), %ecx
+ movl %ecx, 100(%esp) # 4-byte Spill
+ movl 112(%esp), %ecx # 4-byte Reload
+ adcl 1896(%esp), %ecx
+ movl %ecx, 112(%esp) # 4-byte Spill
+ movl 104(%esp), %ecx # 4-byte Reload
+ adcl 1900(%esp), %ecx
+ movl %ecx, 104(%esp) # 4-byte Spill
+ adcl 1904(%esp), %ebp
+ movl %ebp, 108(%esp) # 4-byte Spill
+ adcl 1908(%esp), %eax
+ movl %eax, 116(%esp) # 4-byte Spill
+ sbbl %ebp, %ebp
+ movl %edx, %eax
+ movl %edx, %esi
+ imull 52(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 1784(%esp), %ecx
+ movl 2076(%esp), %eax
+ movl %eax, %edx
+ calll .LmulPv480x32
+ andl $1, %ebp
+ movl %ebp, %ecx
+ addl 1784(%esp), %esi
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 1788(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 1792(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 1796(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 1800(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ adcl 1804(%esp), %edi
+ movl %edi, 64(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 1808(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 60(%esp), %edi # 4-byte Reload
+ adcl 1812(%esp), %edi
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 1816(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 1820(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 1824(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 1828(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 112(%esp), %eax # 4-byte Reload
+ adcl 1832(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 104(%esp), %esi # 4-byte Reload
+ adcl 1836(%esp), %esi
+ movl 108(%esp), %ebp # 4-byte Reload
+ adcl 1840(%esp), %ebp
+ movl 116(%esp), %eax # 4-byte Reload
+ adcl 1844(%esp), %eax
+ movl %eax, 116(%esp) # 4-byte Spill
+ adcl $0, %ecx
+ movl %ecx, 88(%esp) # 4-byte Spill
+ movl 2072(%esp), %eax
+ movl 8(%eax), %eax
+ movl %eax, (%esp)
+ leal 1720(%esp), %ecx
+ movl 2068(%esp), %edx
+ calll .LmulPv480x32
+ movl 96(%esp), %ecx # 4-byte Reload
+ addl 1720(%esp), %ecx
+ movl %ecx, 96(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 1724(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 1728(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 1732(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 1736(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 1740(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ adcl 1744(%esp), %edi
+ movl %edi, 60(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 1748(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 1752(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 76(%esp), %edi # 4-byte Reload
+ adcl 1756(%esp), %edi
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 1760(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 112(%esp), %eax # 4-byte Reload
+ adcl 1764(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ adcl 1768(%esp), %esi
+ movl %esi, 104(%esp) # 4-byte Spill
+ adcl 1772(%esp), %ebp
+ movl %ebp, 108(%esp) # 4-byte Spill
+ movl 116(%esp), %eax # 4-byte Reload
+ adcl 1776(%esp), %eax
+ movl %eax, 116(%esp) # 4-byte Spill
+ movl 88(%esp), %esi # 4-byte Reload
+ adcl 1780(%esp), %esi
+ sbbl %ebp, %ebp
+ movl %ecx, %eax
+ imull 52(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 1656(%esp), %ecx
+ movl 2076(%esp), %edx
+ calll .LmulPv480x32
+ andl $1, %ebp
+ movl %ebp, %ecx
+ movl 96(%esp), %eax # 4-byte Reload
+ addl 1656(%esp), %eax
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 1660(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 1664(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 1668(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 1672(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 1676(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 1680(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 1684(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 72(%esp), %ebp # 4-byte Reload
+ adcl 1688(%esp), %ebp
+ adcl 1692(%esp), %edi
+ movl %edi, 76(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 1696(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 112(%esp), %eax # 4-byte Reload
+ adcl 1700(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 1704(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 1708(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 116(%esp), %edi # 4-byte Reload
+ adcl 1712(%esp), %edi
+ adcl 1716(%esp), %esi
+ movl %esi, 88(%esp) # 4-byte Spill
+ adcl $0, %ecx
+ movl %ecx, 96(%esp) # 4-byte Spill
+ movl 2072(%esp), %eax
+ movl 12(%eax), %eax
+ movl %eax, (%esp)
+ leal 1592(%esp), %ecx
+ movl 2068(%esp), %eax
+ movl %eax, %edx
+ calll .LmulPv480x32
+ movl 84(%esp), %ecx # 4-byte Reload
+ addl 1592(%esp), %ecx
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 1596(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 1600(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 1604(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 1608(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 1612(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 1616(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ adcl 1620(%esp), %ebp
+ movl %ebp, 72(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 1624(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 100(%esp), %esi # 4-byte Reload
+ adcl 1628(%esp), %esi
+ movl 112(%esp), %eax # 4-byte Reload
+ adcl 1632(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 1636(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 1640(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ adcl 1644(%esp), %edi
+ movl %edi, 116(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 1648(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 1652(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ sbbl %edi, %edi
+ movl %ecx, %ebp
+ movl %ebp, %eax
+ imull 52(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 1528(%esp), %ecx
+ movl 2076(%esp), %edx
+ calll .LmulPv480x32
+ andl $1, %edi
+ movl %edi, %ecx
+ addl 1528(%esp), %ebp
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 1532(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 1536(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 1540(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 56(%esp), %ebp # 4-byte Reload
+ adcl 1544(%esp), %ebp
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 1548(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 1552(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 1556(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 1560(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ adcl 1564(%esp), %esi
+ movl %esi, 100(%esp) # 4-byte Spill
+ movl 112(%esp), %edi # 4-byte Reload
+ adcl 1568(%esp), %edi
+ movl 104(%esp), %esi # 4-byte Reload
+ adcl 1572(%esp), %esi
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 1576(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 116(%esp), %eax # 4-byte Reload
+ adcl 1580(%esp), %eax
+ movl %eax, 116(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 1584(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 1588(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ adcl $0, %ecx
+ movl %ecx, 84(%esp) # 4-byte Spill
+ movl 2072(%esp), %eax
+ movl 16(%eax), %eax
+ movl %eax, (%esp)
+ leal 1464(%esp), %ecx
+ movl 2068(%esp), %edx
+ calll .LmulPv480x32
+ movl 92(%esp), %ecx # 4-byte Reload
+ addl 1464(%esp), %ecx
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 1468(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 1472(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ adcl 1476(%esp), %ebp
+ movl %ebp, 56(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 1480(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 68(%esp), %ebp # 4-byte Reload
+ adcl 1484(%esp), %ebp
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 1488(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 1492(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 1496(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ adcl 1500(%esp), %edi
+ movl %edi, 112(%esp) # 4-byte Spill
+ adcl 1504(%esp), %esi
+ movl %esi, 104(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 1508(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 116(%esp), %esi # 4-byte Reload
+ adcl 1512(%esp), %esi
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 1516(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 1520(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 1524(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ sbbl %eax, %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ movl %ecx, %edi
+ imull 52(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 1400(%esp), %ecx
+ movl 2076(%esp), %edx
+ calll .LmulPv480x32
+ movl 92(%esp), %eax # 4-byte Reload
+ andl $1, %eax
+ addl 1400(%esp), %edi
+ movl 80(%esp), %ecx # 4-byte Reload
+ adcl 1404(%esp), %ecx
+ movl %ecx, 80(%esp) # 4-byte Spill
+ movl 64(%esp), %ecx # 4-byte Reload
+ adcl 1408(%esp), %ecx
+ movl %ecx, 64(%esp) # 4-byte Spill
+ movl 56(%esp), %ecx # 4-byte Reload
+ adcl 1412(%esp), %ecx
+ movl %ecx, 56(%esp) # 4-byte Spill
+ movl 60(%esp), %ecx # 4-byte Reload
+ adcl 1416(%esp), %ecx
+ movl %ecx, 60(%esp) # 4-byte Spill
+ adcl 1420(%esp), %ebp
+ movl %ebp, 68(%esp) # 4-byte Spill
+ movl 72(%esp), %ecx # 4-byte Reload
+ adcl 1424(%esp), %ecx
+ movl %ecx, 72(%esp) # 4-byte Spill
+ movl 76(%esp), %ecx # 4-byte Reload
+ adcl 1428(%esp), %ecx
+ movl %ecx, 76(%esp) # 4-byte Spill
+ movl 100(%esp), %edi # 4-byte Reload
+ adcl 1432(%esp), %edi
+ movl 112(%esp), %ecx # 4-byte Reload
+ adcl 1436(%esp), %ecx
+ movl %ecx, 112(%esp) # 4-byte Spill
+ movl 104(%esp), %ecx # 4-byte Reload
+ adcl 1440(%esp), %ecx
+ movl %ecx, 104(%esp) # 4-byte Spill
+ movl 108(%esp), %ecx # 4-byte Reload
+ adcl 1444(%esp), %ecx
+ movl %ecx, 108(%esp) # 4-byte Spill
+ adcl 1448(%esp), %esi
+ movl %esi, %ebp
+ movl 88(%esp), %esi # 4-byte Reload
+ adcl 1452(%esp), %esi
+ movl 96(%esp), %ecx # 4-byte Reload
+ adcl 1456(%esp), %ecx
+ movl %ecx, 96(%esp) # 4-byte Spill
+ movl 84(%esp), %ecx # 4-byte Reload
+ adcl 1460(%esp), %ecx
+ movl %ecx, 84(%esp) # 4-byte Spill
+ adcl $0, %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 2072(%esp), %eax
+ movl 20(%eax), %eax
+ movl %eax, (%esp)
+ leal 1336(%esp), %ecx
+ movl 2068(%esp), %edx
+ calll .LmulPv480x32
+ movl 80(%esp), %ecx # 4-byte Reload
+ addl 1336(%esp), %ecx
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 1340(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 1344(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 1348(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 1352(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 1356(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 1360(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ adcl 1364(%esp), %edi
+ movl %edi, 100(%esp) # 4-byte Spill
+ movl 112(%esp), %eax # 4-byte Reload
+ adcl 1368(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 1372(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 1376(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ adcl 1380(%esp), %ebp
+ movl %ebp, 116(%esp) # 4-byte Spill
+ adcl 1384(%esp), %esi
+ movl %esi, %ebp
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 1388(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 84(%esp), %esi # 4-byte Reload
+ adcl 1392(%esp), %esi
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 1396(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ sbbl %eax, %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ movl %ecx, %edi
+ imull 52(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 1272(%esp), %ecx
+ movl 2076(%esp), %edx
+ calll .LmulPv480x32
+ movl 80(%esp), %eax # 4-byte Reload
+ andl $1, %eax
+ addl 1272(%esp), %edi
+ movl 64(%esp), %ecx # 4-byte Reload
+ adcl 1276(%esp), %ecx
+ movl %ecx, 64(%esp) # 4-byte Spill
+ movl 56(%esp), %ecx # 4-byte Reload
+ adcl 1280(%esp), %ecx
+ movl %ecx, 56(%esp) # 4-byte Spill
+ movl 60(%esp), %ecx # 4-byte Reload
+ adcl 1284(%esp), %ecx
+ movl %ecx, 60(%esp) # 4-byte Spill
+ movl 68(%esp), %ecx # 4-byte Reload
+ adcl 1288(%esp), %ecx
+ movl %ecx, 68(%esp) # 4-byte Spill
+ movl 72(%esp), %ecx # 4-byte Reload
+ adcl 1292(%esp), %ecx
+ movl %ecx, 72(%esp) # 4-byte Spill
+ movl 76(%esp), %ecx # 4-byte Reload
+ adcl 1296(%esp), %ecx
+ movl %ecx, 76(%esp) # 4-byte Spill
+ movl 100(%esp), %ecx # 4-byte Reload
+ adcl 1300(%esp), %ecx
+ movl %ecx, 100(%esp) # 4-byte Spill
+ movl 112(%esp), %ecx # 4-byte Reload
+ adcl 1304(%esp), %ecx
+ movl %ecx, 112(%esp) # 4-byte Spill
+ movl 104(%esp), %ecx # 4-byte Reload
+ adcl 1308(%esp), %ecx
+ movl %ecx, 104(%esp) # 4-byte Spill
+ movl 108(%esp), %ecx # 4-byte Reload
+ adcl 1312(%esp), %ecx
+ movl %ecx, 108(%esp) # 4-byte Spill
+ movl 116(%esp), %ecx # 4-byte Reload
+ adcl 1316(%esp), %ecx
+ movl %ecx, 116(%esp) # 4-byte Spill
+ adcl 1320(%esp), %ebp
+ movl %ebp, 88(%esp) # 4-byte Spill
+ movl 96(%esp), %ecx # 4-byte Reload
+ adcl 1324(%esp), %ecx
+ movl %ecx, 96(%esp) # 4-byte Spill
+ adcl 1328(%esp), %esi
+ movl %esi, 84(%esp) # 4-byte Spill
+ movl 92(%esp), %ecx # 4-byte Reload
+ adcl 1332(%esp), %ecx
+ movl %ecx, 92(%esp) # 4-byte Spill
+ adcl $0, %eax
+ movl %eax, %edi
+ movl 2072(%esp), %eax
+ movl 24(%eax), %eax
+ movl %eax, (%esp)
+ leal 1208(%esp), %ecx
+ movl 2068(%esp), %edx
+ calll .LmulPv480x32
+ movl 64(%esp), %ecx # 4-byte Reload
+ addl 1208(%esp), %ecx
+ movl %ecx, 64(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 1212(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 1216(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 1220(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 1224(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 1228(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 100(%esp), %ebp # 4-byte Reload
+ adcl 1232(%esp), %ebp
+ movl 112(%esp), %eax # 4-byte Reload
+ adcl 1236(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 1240(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 108(%esp), %esi # 4-byte Reload
+ adcl 1244(%esp), %esi
+ movl 116(%esp), %eax # 4-byte Reload
+ adcl 1248(%esp), %eax
+ movl %eax, 116(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 1252(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 1256(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 1260(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 1264(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ adcl 1268(%esp), %edi
+ movl %edi, 80(%esp) # 4-byte Spill
+ sbbl %edi, %edi
+ movl %ecx, %eax
+ imull 52(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 1144(%esp), %ecx
+ movl 2076(%esp), %edx
+ calll .LmulPv480x32
+ andl $1, %edi
+ movl %edi, %ecx
+ movl 64(%esp), %eax # 4-byte Reload
+ addl 1144(%esp), %eax
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 1148(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 1152(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 68(%esp), %edi # 4-byte Reload
+ adcl 1156(%esp), %edi
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 1160(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 1164(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ adcl 1168(%esp), %ebp
+ movl %ebp, 100(%esp) # 4-byte Spill
+ movl 112(%esp), %eax # 4-byte Reload
+ adcl 1172(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 1176(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ adcl 1180(%esp), %esi
+ movl %esi, 108(%esp) # 4-byte Spill
+ movl 116(%esp), %eax # 4-byte Reload
+ adcl 1184(%esp), %eax
+ movl %eax, 116(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 1188(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 1192(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 84(%esp), %ebp # 4-byte Reload
+ adcl 1196(%esp), %ebp
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 1200(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 1204(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ adcl $0, %ecx
+ movl %ecx, 64(%esp) # 4-byte Spill
+ movl 2072(%esp), %eax
+ movl 28(%eax), %eax
+ movl %eax, (%esp)
+ leal 1080(%esp), %ecx
+ movl 2068(%esp), %edx
+ calll .LmulPv480x32
+ movl 56(%esp), %ecx # 4-byte Reload
+ addl 1080(%esp), %ecx
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 1084(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ adcl 1088(%esp), %edi
+ movl %edi, 68(%esp) # 4-byte Spill
+ movl 72(%esp), %esi # 4-byte Reload
+ adcl 1092(%esp), %esi
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 1096(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 1100(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 112(%esp), %eax # 4-byte Reload
+ adcl 1104(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 1108(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 1112(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 116(%esp), %eax # 4-byte Reload
+ adcl 1116(%esp), %eax
+ movl %eax, 116(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 1120(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 1124(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ adcl 1128(%esp), %ebp
+ movl %ebp, 84(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 1132(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 1136(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 1140(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ sbbl %edi, %edi
+ movl %ecx, %eax
+ movl %ecx, %ebp
+ imull 52(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 1016(%esp), %ecx
+ movl 2076(%esp), %edx
+ calll .LmulPv480x32
+ andl $1, %edi
+ movl %edi, %ecx
+ addl 1016(%esp), %ebp
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 1020(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 1024(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ adcl 1028(%esp), %esi
+ movl %esi, 72(%esp) # 4-byte Spill
+ movl 76(%esp), %edi # 4-byte Reload
+ adcl 1032(%esp), %edi
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 1036(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 112(%esp), %eax # 4-byte Reload
+ adcl 1040(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 104(%esp), %esi # 4-byte Reload
+ adcl 1044(%esp), %esi
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 1048(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 116(%esp), %eax # 4-byte Reload
+ adcl 1052(%esp), %eax
+ movl %eax, 116(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 1056(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 96(%esp), %ebp # 4-byte Reload
+ adcl 1060(%esp), %ebp
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 1064(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 1068(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 1072(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 1076(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ adcl $0, %ecx
+ movl %ecx, 56(%esp) # 4-byte Spill
+ movl 2072(%esp), %eax
+ movl 32(%eax), %eax
+ movl %eax, (%esp)
+ leal 952(%esp), %ecx
+ movl 2068(%esp), %edx
+ calll .LmulPv480x32
+ movl 60(%esp), %ecx # 4-byte Reload
+ addl 952(%esp), %ecx
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 956(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 960(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ adcl 964(%esp), %edi
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 968(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 112(%esp), %eax # 4-byte Reload
+ adcl 972(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ adcl 976(%esp), %esi
+ movl %esi, 104(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 980(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 116(%esp), %eax # 4-byte Reload
+ adcl 984(%esp), %eax
+ movl %eax, 116(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 988(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ adcl 992(%esp), %ebp
+ movl %ebp, 96(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 996(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 1000(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 1004(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 1008(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 1012(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ sbbl %ebp, %ebp
+ movl %ecx, %eax
+ movl %ecx, %esi
+ imull 52(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 888(%esp), %ecx
+ movl 2076(%esp), %edx
+ calll .LmulPv480x32
+ movl %ebp, %eax
+ andl $1, %eax
+ addl 888(%esp), %esi
+ movl 68(%esp), %ecx # 4-byte Reload
+ adcl 892(%esp), %ecx
+ movl %ecx, 68(%esp) # 4-byte Spill
+ movl 72(%esp), %ecx # 4-byte Reload
+ adcl 896(%esp), %ecx
+ movl %ecx, 72(%esp) # 4-byte Spill
+ adcl 900(%esp), %edi
+ movl 100(%esp), %ecx # 4-byte Reload
+ adcl 904(%esp), %ecx
+ movl %ecx, 100(%esp) # 4-byte Spill
+ movl 112(%esp), %ecx # 4-byte Reload
+ adcl 908(%esp), %ecx
+ movl %ecx, 112(%esp) # 4-byte Spill
+ movl 104(%esp), %ecx # 4-byte Reload
+ adcl 912(%esp), %ecx
+ movl %ecx, 104(%esp) # 4-byte Spill
+ movl 108(%esp), %ecx # 4-byte Reload
+ adcl 916(%esp), %ecx
+ movl %ecx, 108(%esp) # 4-byte Spill
+ movl 116(%esp), %ecx # 4-byte Reload
+ adcl 920(%esp), %ecx
+ movl %ecx, 116(%esp) # 4-byte Spill
+ movl 88(%esp), %ebp # 4-byte Reload
+ adcl 924(%esp), %ebp
+ movl 96(%esp), %ecx # 4-byte Reload
+ adcl 928(%esp), %ecx
+ movl %ecx, 96(%esp) # 4-byte Spill
+ movl 84(%esp), %ecx # 4-byte Reload
+ adcl 932(%esp), %ecx
+ movl %ecx, 84(%esp) # 4-byte Spill
+ movl 92(%esp), %ecx # 4-byte Reload
+ adcl 936(%esp), %ecx
+ movl %ecx, 92(%esp) # 4-byte Spill
+ movl 80(%esp), %ecx # 4-byte Reload
+ adcl 940(%esp), %ecx
+ movl %ecx, 80(%esp) # 4-byte Spill
+ movl 64(%esp), %esi # 4-byte Reload
+ adcl 944(%esp), %esi
+ movl 56(%esp), %ecx # 4-byte Reload
+ adcl 948(%esp), %ecx
+ movl %ecx, 56(%esp) # 4-byte Spill
+ adcl $0, %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 2072(%esp), %eax
+ movl 36(%eax), %eax
+ movl %eax, (%esp)
+ leal 824(%esp), %ecx
+ movl 2068(%esp), %edx
+ calll .LmulPv480x32
+ movl 68(%esp), %ecx # 4-byte Reload
+ addl 824(%esp), %ecx
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 828(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ adcl 832(%esp), %edi
+ movl %edi, 76(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 836(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 112(%esp), %eax # 4-byte Reload
+ adcl 840(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 844(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 848(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 116(%esp), %eax # 4-byte Reload
+ adcl 852(%esp), %eax
+ movl %eax, 116(%esp) # 4-byte Spill
+ adcl 856(%esp), %ebp
+ movl %ebp, 88(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 860(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 84(%esp), %ebp # 4-byte Reload
+ adcl 864(%esp), %ebp
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 868(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 80(%esp), %edi # 4-byte Reload
+ adcl 872(%esp), %edi
+ adcl 876(%esp), %esi
+ movl %esi, 64(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 880(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 884(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ sbbl %eax, %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ movl %ecx, %esi
+ imull 52(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 760(%esp), %ecx
+ movl 2076(%esp), %edx
+ calll .LmulPv480x32
+ movl 68(%esp), %ecx # 4-byte Reload
+ andl $1, %ecx
+ addl 760(%esp), %esi
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 764(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 768(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 772(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 112(%esp), %esi # 4-byte Reload
+ adcl 776(%esp), %esi
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 780(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 784(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 116(%esp), %eax # 4-byte Reload
+ adcl 788(%esp), %eax
+ movl %eax, 116(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 792(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 796(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ adcl 800(%esp), %ebp
+ movl %ebp, 84(%esp) # 4-byte Spill
+ movl 92(%esp), %ebp # 4-byte Reload
+ adcl 804(%esp), %ebp
+ adcl 808(%esp), %edi
+ movl %edi, 80(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 812(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 56(%esp), %edi # 4-byte Reload
+ adcl 816(%esp), %edi
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 820(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ adcl $0, %ecx
+ movl %ecx, 68(%esp) # 4-byte Spill
+ movl 2072(%esp), %eax
+ movl 40(%eax), %eax
+ movl %eax, (%esp)
+ leal 696(%esp), %ecx
+ movl 2068(%esp), %edx
+ calll .LmulPv480x32
+ movl 72(%esp), %ecx # 4-byte Reload
+ addl 696(%esp), %ecx
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 700(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 704(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ adcl 708(%esp), %esi
+ movl %esi, 112(%esp) # 4-byte Spill
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 712(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 716(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 116(%esp), %eax # 4-byte Reload
+ adcl 720(%esp), %eax
+ movl %eax, 116(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 724(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 728(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 732(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ adcl 736(%esp), %ebp
+ movl %ebp, 92(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 740(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 744(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ adcl 748(%esp), %edi
+ movl %edi, 56(%esp) # 4-byte Spill
+ movl 60(%esp), %ebp # 4-byte Reload
+ adcl 752(%esp), %ebp
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 756(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ sbbl %edi, %edi
+ movl %ecx, %eax
+ movl %ecx, %esi
+ imull 52(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 632(%esp), %ecx
+ movl 2076(%esp), %edx
+ calll .LmulPv480x32
+ andl $1, %edi
+ movl %edi, %ecx
+ addl 632(%esp), %esi
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 636(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 640(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 112(%esp), %eax # 4-byte Reload
+ adcl 644(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 648(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 652(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 116(%esp), %edi # 4-byte Reload
+ adcl 656(%esp), %edi
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 660(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 664(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 668(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 92(%esp), %esi # 4-byte Reload
+ adcl 672(%esp), %esi
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 676(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 680(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 684(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ adcl 688(%esp), %ebp
+ movl %ebp, 60(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 692(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ adcl $0, %ecx
+ movl %ecx, 72(%esp) # 4-byte Spill
+ movl 2072(%esp), %eax
+ movl 44(%eax), %eax
+ movl %eax, (%esp)
+ leal 568(%esp), %ecx
+ movl 2068(%esp), %edx
+ calll .LmulPv480x32
+ movl 76(%esp), %ecx # 4-byte Reload
+ addl 568(%esp), %ecx
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 572(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 112(%esp), %eax # 4-byte Reload
+ adcl 576(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 580(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 584(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ adcl 588(%esp), %edi
+ movl %edi, 116(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 592(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 96(%esp), %ebp # 4-byte Reload
+ adcl 596(%esp), %ebp
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 600(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ adcl 604(%esp), %esi
+ movl %esi, 92(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 608(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 612(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 616(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 620(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 624(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 628(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ sbbl %edi, %edi
+ movl %ecx, %eax
+ movl %ecx, %esi
+ imull 52(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 504(%esp), %ecx
+ movl 2076(%esp), %edx
+ calll .LmulPv480x32
+ andl $1, %edi
+ movl %edi, %ecx
+ addl 504(%esp), %esi
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 508(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 112(%esp), %eax # 4-byte Reload
+ adcl 512(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 104(%esp), %edi # 4-byte Reload
+ adcl 516(%esp), %edi
+ movl 108(%esp), %esi # 4-byte Reload
+ adcl 520(%esp), %esi
+ movl 116(%esp), %eax # 4-byte Reload
+ adcl 524(%esp), %eax
+ movl %eax, 116(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 528(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ adcl 532(%esp), %ebp
+ movl %ebp, 96(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 536(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 540(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 544(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 548(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 552(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 556(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 68(%esp), %ebp # 4-byte Reload
+ adcl 560(%esp), %ebp
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 564(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ adcl $0, %ecx
+ movl %ecx, 76(%esp) # 4-byte Spill
+ movl 2072(%esp), %eax
+ movl 48(%eax), %eax
+ movl %eax, (%esp)
+ leal 440(%esp), %ecx
+ movl 2068(%esp), %edx
+ calll .LmulPv480x32
+ movl 100(%esp), %ecx # 4-byte Reload
+ addl 440(%esp), %ecx
+ movl 112(%esp), %eax # 4-byte Reload
+ adcl 444(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ adcl 448(%esp), %edi
+ movl %edi, 104(%esp) # 4-byte Spill
+ adcl 452(%esp), %esi
+ movl %esi, 108(%esp) # 4-byte Spill
+ movl 116(%esp), %eax # 4-byte Reload
+ adcl 456(%esp), %eax
+ movl %eax, 116(%esp) # 4-byte Spill
+ movl 88(%esp), %edi # 4-byte Reload
+ adcl 460(%esp), %edi
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 464(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 468(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 472(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 476(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 480(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 484(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 488(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ adcl 492(%esp), %ebp
+ movl %ebp, 68(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 496(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 500(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ sbbl %ebp, %ebp
+ movl %ecx, %eax
+ movl %ecx, %esi
+ imull 52(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 376(%esp), %ecx
+ movl 2076(%esp), %edx
+ calll .LmulPv480x32
+ andl $1, %ebp
+ movl %ebp, %ecx
+ addl 376(%esp), %esi
+ movl 112(%esp), %eax # 4-byte Reload
+ adcl 380(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 384(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 108(%esp), %esi # 4-byte Reload
+ adcl 388(%esp), %esi
+ movl 116(%esp), %eax # 4-byte Reload
+ adcl 392(%esp), %eax
+ movl %eax, 116(%esp) # 4-byte Spill
+ adcl 396(%esp), %edi
+ movl %edi, 88(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 400(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 84(%esp), %edi # 4-byte Reload
+ adcl 404(%esp), %edi
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 408(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 412(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 64(%esp), %ebp # 4-byte Reload
+ adcl 416(%esp), %ebp
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 420(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 424(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 428(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 432(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 436(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ adcl $0, %ecx
+ movl %ecx, 100(%esp) # 4-byte Spill
+ movl 2072(%esp), %eax
+ movl 52(%eax), %eax
+ movl %eax, (%esp)
+ leal 312(%esp), %ecx
+ movl 2068(%esp), %edx
+ calll .LmulPv480x32
+ movl 112(%esp), %ecx # 4-byte Reload
+ addl 312(%esp), %ecx
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 316(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ adcl 320(%esp), %esi
+ movl %esi, 108(%esp) # 4-byte Spill
+ movl 116(%esp), %eax # 4-byte Reload
+ adcl 324(%esp), %eax
+ movl %eax, 116(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 328(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 332(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ adcl 336(%esp), %edi
+ movl %edi, 84(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 340(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 344(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ adcl 348(%esp), %ebp
+ movl %ebp, 64(%esp) # 4-byte Spill
+ movl 56(%esp), %ebp # 4-byte Reload
+ adcl 352(%esp), %ebp
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 356(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 360(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 364(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 368(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 372(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ sbbl %esi, %esi
+ movl %ecx, %eax
+ movl %ecx, %edi
+ imull 52(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 248(%esp), %ecx
+ movl 2076(%esp), %edx
+ calll .LmulPv480x32
+ andl $1, %esi
+ movl %esi, %ecx
+ addl 248(%esp), %edi
+ movl 104(%esp), %esi # 4-byte Reload
+ adcl 252(%esp), %esi
+ movl 108(%esp), %edi # 4-byte Reload
+ adcl 256(%esp), %edi
+ movl 116(%esp), %eax # 4-byte Reload
+ adcl 260(%esp), %eax
+ movl %eax, 116(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 264(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 268(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 272(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 276(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 280(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 284(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ adcl 288(%esp), %ebp
+ movl %ebp, 56(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 292(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 296(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 300(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 304(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 308(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ adcl $0, %ecx
+ movl %ecx, 112(%esp) # 4-byte Spill
+ movl 2072(%esp), %eax
+ movl 56(%eax), %eax
+ movl %eax, (%esp)
+ leal 184(%esp), %ecx
+ movl 2068(%esp), %edx
+ calll .LmulPv480x32
+ movl %esi, %ecx
+ movl 96(%esp), %esi # 4-byte Reload
+ addl 184(%esp), %ecx
+ adcl 188(%esp), %edi
+ movl %edi, 108(%esp) # 4-byte Spill
+ movl 116(%esp), %eax # 4-byte Reload
+ adcl 192(%esp), %eax
+ movl %eax, 116(%esp) # 4-byte Spill
+ movl 88(%esp), %ebp # 4-byte Reload
+ adcl 196(%esp), %ebp
+ adcl 200(%esp), %esi
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 204(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 208(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 212(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 216(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 220(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 224(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 228(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 232(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 236(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 240(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 112(%esp), %eax # 4-byte Reload
+ adcl 244(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ sbbl %eax, %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ imull %ecx, %eax
+ movl %ecx, %edi
+ movl %eax, (%esp)
+ leal 120(%esp), %ecx
+ movl 2076(%esp), %edx
+ calll .LmulPv480x32
+ movl 104(%esp), %ebx # 4-byte Reload
+ andl $1, %ebx
+ addl 120(%esp), %edi
+ movl %ebp, %edi
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 124(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 116(%esp), %ecx # 4-byte Reload
+ adcl 128(%esp), %ecx
+ movl %ecx, 116(%esp) # 4-byte Spill
+ adcl 132(%esp), %edi
+ adcl 136(%esp), %esi
+ movl 84(%esp), %edx # 4-byte Reload
+ adcl 140(%esp), %edx
+ movl %edx, 84(%esp) # 4-byte Spill
+ movl 92(%esp), %edx # 4-byte Reload
+ adcl 144(%esp), %edx
+ movl %edx, 92(%esp) # 4-byte Spill
+ movl 80(%esp), %edx # 4-byte Reload
+ adcl 148(%esp), %edx
+ movl %edx, 80(%esp) # 4-byte Spill
+ movl 64(%esp), %edx # 4-byte Reload
+ adcl 152(%esp), %edx
+ movl %edx, 64(%esp) # 4-byte Spill
+ movl 56(%esp), %edx # 4-byte Reload
+ adcl 156(%esp), %edx
+ movl %edx, 56(%esp) # 4-byte Spill
+ movl 60(%esp), %edx # 4-byte Reload
+ adcl 160(%esp), %edx
+ movl %edx, 60(%esp) # 4-byte Spill
+ movl 68(%esp), %edx # 4-byte Reload
+ adcl 164(%esp), %edx
+ movl %edx, 68(%esp) # 4-byte Spill
+ movl 72(%esp), %edx # 4-byte Reload
+ adcl 168(%esp), %edx
+ movl %edx, 72(%esp) # 4-byte Spill
+ movl 76(%esp), %edx # 4-byte Reload
+ adcl 172(%esp), %edx
+ movl %edx, 76(%esp) # 4-byte Spill
+ movl 100(%esp), %edx # 4-byte Reload
+ adcl 176(%esp), %edx
+ movl %edx, 100(%esp) # 4-byte Spill
+ movl 112(%esp), %edx # 4-byte Reload
+ adcl 180(%esp), %edx
+ movl %edx, 112(%esp) # 4-byte Spill
+ adcl $0, %ebx
+ movl %ebx, 104(%esp) # 4-byte Spill
+ movl %eax, %edx
+ movl 2076(%esp), %ebp
+ subl (%ebp), %edx
+ sbbl 4(%ebp), %ecx
+ movl %edi, %eax
+ sbbl 8(%ebp), %eax
+ movl %esi, %ebx
+ sbbl 12(%ebp), %ebx
+ movl %ebx, 16(%esp) # 4-byte Spill
+ movl 84(%esp), %ebx # 4-byte Reload
+ sbbl 16(%ebp), %ebx
+ movl %ebx, 20(%esp) # 4-byte Spill
+ movl 92(%esp), %ebx # 4-byte Reload
+ sbbl 20(%ebp), %ebx
+ movl %ebx, 24(%esp) # 4-byte Spill
+ movl 80(%esp), %ebx # 4-byte Reload
+ sbbl 24(%ebp), %ebx
+ movl %ebx, 28(%esp) # 4-byte Spill
+ movl 64(%esp), %ebx # 4-byte Reload
+ sbbl 28(%ebp), %ebx
+ movl %ebx, 32(%esp) # 4-byte Spill
+ movl 56(%esp), %ebx # 4-byte Reload
+ sbbl 32(%ebp), %ebx
+ movl %ebx, 36(%esp) # 4-byte Spill
+ movl 60(%esp), %ebx # 4-byte Reload
+ sbbl 36(%ebp), %ebx
+ movl %ebx, 40(%esp) # 4-byte Spill
+ movl 68(%esp), %ebx # 4-byte Reload
+ sbbl 40(%ebp), %ebx
+ movl %ebx, 44(%esp) # 4-byte Spill
+ movl 72(%esp), %ebx # 4-byte Reload
+ sbbl 44(%ebp), %ebx
+ movl %ebx, 48(%esp) # 4-byte Spill
+ movl 76(%esp), %ebx # 4-byte Reload
+ sbbl 48(%ebp), %ebx
+ movl %ebx, 52(%esp) # 4-byte Spill
+ movl 100(%esp), %ebx # 4-byte Reload
+ sbbl 52(%ebp), %ebx
+ movl %ebx, 88(%esp) # 4-byte Spill
+ movl 112(%esp), %ebx # 4-byte Reload
+ sbbl 56(%ebp), %ebx
+ movl %ebx, 96(%esp) # 4-byte Spill
+ movl 104(%esp), %ebx # 4-byte Reload
+ movl 108(%esp), %ebp # 4-byte Reload
+ sbbl $0, %ebx
+ andl $1, %ebx
+ jne .LBB226_2
+# BB#1:
+ movl %edx, %ebp
+.LBB226_2:
+ movl 2064(%esp), %edx
+ movl %ebp, (%edx)
+ testb %bl, %bl
+ movl 116(%esp), %ebp # 4-byte Reload
+ jne .LBB226_4
+# BB#3:
+ movl %ecx, %ebp
+.LBB226_4:
+ movl %ebp, 4(%edx)
+ jne .LBB226_6
+# BB#5:
+ movl %eax, %edi
+.LBB226_6:
+ movl %edi, 8(%edx)
+ jne .LBB226_8
+# BB#7:
+ movl 16(%esp), %esi # 4-byte Reload
+.LBB226_8:
+ movl %esi, 12(%edx)
+ movl 84(%esp), %eax # 4-byte Reload
+ jne .LBB226_10
+# BB#9:
+ movl 20(%esp), %eax # 4-byte Reload
+.LBB226_10:
+ movl %eax, 16(%edx)
+ movl 92(%esp), %eax # 4-byte Reload
+ jne .LBB226_12
+# BB#11:
+ movl 24(%esp), %eax # 4-byte Reload
+.LBB226_12:
+ movl %eax, 20(%edx)
+ movl 80(%esp), %eax # 4-byte Reload
+ jne .LBB226_14
+# BB#13:
+ movl 28(%esp), %eax # 4-byte Reload
+.LBB226_14:
+ movl %eax, 24(%edx)
+ movl 64(%esp), %eax # 4-byte Reload
+ jne .LBB226_16
+# BB#15:
+ movl 32(%esp), %eax # 4-byte Reload
+.LBB226_16:
+ movl %eax, 28(%edx)
+ movl 56(%esp), %eax # 4-byte Reload
+ jne .LBB226_18
+# BB#17:
+ movl 36(%esp), %eax # 4-byte Reload
+.LBB226_18:
+ movl %eax, 32(%edx)
+ movl 60(%esp), %eax # 4-byte Reload
+ jne .LBB226_20
+# BB#19:
+ movl 40(%esp), %eax # 4-byte Reload
+.LBB226_20:
+ movl %eax, 36(%edx)
+ movl 68(%esp), %eax # 4-byte Reload
+ jne .LBB226_22
+# BB#21:
+ movl 44(%esp), %eax # 4-byte Reload
+.LBB226_22:
+ movl %eax, 40(%edx)
+ movl 72(%esp), %eax # 4-byte Reload
+ jne .LBB226_24
+# BB#23:
+ movl 48(%esp), %eax # 4-byte Reload
+.LBB226_24:
+ movl %eax, 44(%edx)
+ movl 76(%esp), %eax # 4-byte Reload
+ jne .LBB226_26
+# BB#25:
+ movl 52(%esp), %eax # 4-byte Reload
+.LBB226_26:
+ movl %eax, 48(%edx)
+ movl 100(%esp), %eax # 4-byte Reload
+ jne .LBB226_28
+# BB#27:
+ movl 88(%esp), %eax # 4-byte Reload
+.LBB226_28:
+ movl %eax, 52(%edx)
+ movl 112(%esp), %eax # 4-byte Reload
+ jne .LBB226_30
+# BB#29:
+ movl 96(%esp), %eax # 4-byte Reload
+.LBB226_30:
+ movl %eax, 56(%edx)
+ addl $2044, %esp # imm = 0x7FC
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end226:
+ .size mcl_fp_mont15L, .Lfunc_end226-mcl_fp_mont15L
+
+ .globl mcl_fp_montNF15L
+ .align 16, 0x90
+ .type mcl_fp_montNF15L,@function
+mcl_fp_montNF15L: # @mcl_fp_montNF15L
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $2028, %esp # imm = 0x7EC
+ calll .L227$pb
+.L227$pb:
+ popl %ebx
+.Ltmp48:
+ addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp48-.L227$pb), %ebx
+ movl 2060(%esp), %eax
+ movl -4(%eax), %esi
+ movl %esi, 36(%esp) # 4-byte Spill
+ movl 2056(%esp), %eax
+ movl (%eax), %eax
+ movl %eax, (%esp)
+ leal 1960(%esp), %ecx
+ movl 2052(%esp), %edx
+ calll .LmulPv480x32
+ movl 1960(%esp), %ebp
+ movl 1964(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl %ebp, %eax
+ imull %esi, %eax
+ movl 2020(%esp), %ecx
+ movl %ecx, 100(%esp) # 4-byte Spill
+ movl 2016(%esp), %ecx
+ movl %ecx, 96(%esp) # 4-byte Spill
+ movl 2012(%esp), %ecx
+ movl %ecx, 76(%esp) # 4-byte Spill
+ movl 2008(%esp), %ecx
+ movl %ecx, 80(%esp) # 4-byte Spill
+ movl 2004(%esp), %ecx
+ movl %ecx, 84(%esp) # 4-byte Spill
+ movl 2000(%esp), %ecx
+ movl %ecx, 60(%esp) # 4-byte Spill
+ movl 1996(%esp), %ecx
+ movl %ecx, 56(%esp) # 4-byte Spill
+ movl 1992(%esp), %ecx
+ movl %ecx, 52(%esp) # 4-byte Spill
+ movl 1988(%esp), %ecx
+ movl %ecx, 44(%esp) # 4-byte Spill
+ movl 1984(%esp), %ecx
+ movl %ecx, 48(%esp) # 4-byte Spill
+ movl 1980(%esp), %esi
+ movl 1976(%esp), %edi
+ movl 1972(%esp), %ecx
+ movl %ecx, 88(%esp) # 4-byte Spill
+ movl 1968(%esp), %ecx
+ movl %ecx, 68(%esp) # 4-byte Spill
+ movl %eax, (%esp)
+ leal 1896(%esp), %ecx
+ movl 2060(%esp), %edx
+ calll .LmulPv480x32
+ addl 1896(%esp), %ebp
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 1900(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 1904(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 1908(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ adcl 1912(%esp), %edi
+ movl %edi, 72(%esp) # 4-byte Spill
+ adcl 1916(%esp), %esi
+ movl %esi, %edi
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 1920(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 1924(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 1928(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 1932(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 1936(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 1940(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 80(%esp), %ebp # 4-byte Reload
+ adcl 1944(%esp), %ebp
+ movl 76(%esp), %esi # 4-byte Reload
+ adcl 1948(%esp), %esi
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 1952(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 1956(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 2056(%esp), %eax
+ movl 4(%eax), %eax
+ movl %eax, (%esp)
+ leal 1832(%esp), %ecx
+ movl 2052(%esp), %edx
+ calll .LmulPv480x32
+ movl 1892(%esp), %eax
+ movl 92(%esp), %edx # 4-byte Reload
+ addl 1832(%esp), %edx
+ movl 68(%esp), %ecx # 4-byte Reload
+ adcl 1836(%esp), %ecx
+ movl %ecx, 68(%esp) # 4-byte Spill
+ movl 88(%esp), %ecx # 4-byte Reload
+ adcl 1840(%esp), %ecx
+ movl %ecx, 88(%esp) # 4-byte Spill
+ movl 72(%esp), %ecx # 4-byte Reload
+ adcl 1844(%esp), %ecx
+ movl %ecx, 72(%esp) # 4-byte Spill
+ adcl 1848(%esp), %edi
+ movl 48(%esp), %ecx # 4-byte Reload
+ adcl 1852(%esp), %ecx
+ movl %ecx, 48(%esp) # 4-byte Spill
+ movl 44(%esp), %ecx # 4-byte Reload
+ adcl 1856(%esp), %ecx
+ movl %ecx, 44(%esp) # 4-byte Spill
+ movl 52(%esp), %ecx # 4-byte Reload
+ adcl 1860(%esp), %ecx
+ movl %ecx, 52(%esp) # 4-byte Spill
+ movl 56(%esp), %ecx # 4-byte Reload
+ adcl 1864(%esp), %ecx
+ movl %ecx, 56(%esp) # 4-byte Spill
+ movl 60(%esp), %ecx # 4-byte Reload
+ adcl 1868(%esp), %ecx
+ movl %ecx, 60(%esp) # 4-byte Spill
+ movl 84(%esp), %ecx # 4-byte Reload
+ adcl 1872(%esp), %ecx
+ movl %ecx, 84(%esp) # 4-byte Spill
+ adcl 1876(%esp), %ebp
+ movl %ebp, 80(%esp) # 4-byte Spill
+ adcl 1880(%esp), %esi
+ movl %esi, 76(%esp) # 4-byte Spill
+ movl 96(%esp), %ebp # 4-byte Reload
+ adcl 1884(%esp), %ebp
+ movl 100(%esp), %ecx # 4-byte Reload
+ adcl 1888(%esp), %ecx
+ movl %ecx, 100(%esp) # 4-byte Spill
+ adcl $0, %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl %edx, %eax
+ movl %edx, %esi
+ imull 36(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 1768(%esp), %ecx
+ movl 2060(%esp), %edx
+ calll .LmulPv480x32
+ addl 1768(%esp), %esi
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 1772(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 1776(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 1780(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ adcl 1784(%esp), %edi
+ movl %edi, 40(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 1788(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 1792(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 1796(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 1800(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 60(%esp), %edi # 4-byte Reload
+ adcl 1804(%esp), %edi
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 1808(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 1812(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 1816(%esp), %eax
+ movl %eax, %esi
+ adcl 1820(%esp), %ebp
+ movl %ebp, 96(%esp) # 4-byte Spill
+ movl 100(%esp), %ebp # 4-byte Reload
+ adcl 1824(%esp), %ebp
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 1828(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 2056(%esp), %eax
+ movl 8(%eax), %eax
+ movl %eax, (%esp)
+ leal 1704(%esp), %ecx
+ movl 2052(%esp), %edx
+ calll .LmulPv480x32
+ movl 1764(%esp), %eax
+ movl 68(%esp), %edx # 4-byte Reload
+ addl 1704(%esp), %edx
+ movl 88(%esp), %ecx # 4-byte Reload
+ adcl 1708(%esp), %ecx
+ movl %ecx, 88(%esp) # 4-byte Spill
+ movl 72(%esp), %ecx # 4-byte Reload
+ adcl 1712(%esp), %ecx
+ movl %ecx, 72(%esp) # 4-byte Spill
+ movl 40(%esp), %ecx # 4-byte Reload
+ adcl 1716(%esp), %ecx
+ movl %ecx, 40(%esp) # 4-byte Spill
+ movl 48(%esp), %ecx # 4-byte Reload
+ adcl 1720(%esp), %ecx
+ movl %ecx, 48(%esp) # 4-byte Spill
+ movl 44(%esp), %ecx # 4-byte Reload
+ adcl 1724(%esp), %ecx
+ movl %ecx, 44(%esp) # 4-byte Spill
+ movl 52(%esp), %ecx # 4-byte Reload
+ adcl 1728(%esp), %ecx
+ movl %ecx, 52(%esp) # 4-byte Spill
+ movl 56(%esp), %ecx # 4-byte Reload
+ adcl 1732(%esp), %ecx
+ movl %ecx, 56(%esp) # 4-byte Spill
+ adcl 1736(%esp), %edi
+ movl %edi, 60(%esp) # 4-byte Spill
+ movl 84(%esp), %ecx # 4-byte Reload
+ adcl 1740(%esp), %ecx
+ movl %ecx, 84(%esp) # 4-byte Spill
+ movl 80(%esp), %edi # 4-byte Reload
+ adcl 1744(%esp), %edi
+ adcl 1748(%esp), %esi
+ movl %esi, 76(%esp) # 4-byte Spill
+ movl 96(%esp), %ecx # 4-byte Reload
+ adcl 1752(%esp), %ecx
+ movl %ecx, 96(%esp) # 4-byte Spill
+ adcl 1756(%esp), %ebp
+ movl 92(%esp), %ecx # 4-byte Reload
+ adcl 1760(%esp), %ecx
+ movl %ecx, 92(%esp) # 4-byte Spill
+ adcl $0, %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl %edx, %eax
+ movl %edx, %esi
+ imull 36(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 1640(%esp), %ecx
+ movl 2060(%esp), %eax
+ movl %eax, %edx
+ calll .LmulPv480x32
+ addl 1640(%esp), %esi
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 1644(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 1648(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 40(%esp), %eax # 4-byte Reload
+ adcl 1652(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 1656(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 1660(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 1664(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 1668(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 1672(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 1676(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ adcl 1680(%esp), %edi
+ movl %edi, 80(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 1684(%esp), %eax
+ movl %eax, %esi
+ movl 96(%esp), %edi # 4-byte Reload
+ adcl 1688(%esp), %edi
+ adcl 1692(%esp), %ebp
+ movl %ebp, 100(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 1696(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 68(%esp), %ebp # 4-byte Reload
+ adcl 1700(%esp), %ebp
+ movl 2056(%esp), %eax
+ movl 12(%eax), %eax
+ movl %eax, (%esp)
+ leal 1576(%esp), %ecx
+ movl 2052(%esp), %edx
+ calll .LmulPv480x32
+ movl 1636(%esp), %eax
+ movl 88(%esp), %edx # 4-byte Reload
+ addl 1576(%esp), %edx
+ movl 72(%esp), %ecx # 4-byte Reload
+ adcl 1580(%esp), %ecx
+ movl %ecx, 72(%esp) # 4-byte Spill
+ movl 40(%esp), %ecx # 4-byte Reload
+ adcl 1584(%esp), %ecx
+ movl %ecx, 40(%esp) # 4-byte Spill
+ movl 48(%esp), %ecx # 4-byte Reload
+ adcl 1588(%esp), %ecx
+ movl %ecx, 48(%esp) # 4-byte Spill
+ movl 44(%esp), %ecx # 4-byte Reload
+ adcl 1592(%esp), %ecx
+ movl %ecx, 44(%esp) # 4-byte Spill
+ movl 52(%esp), %ecx # 4-byte Reload
+ adcl 1596(%esp), %ecx
+ movl %ecx, 52(%esp) # 4-byte Spill
+ movl 56(%esp), %ecx # 4-byte Reload
+ adcl 1600(%esp), %ecx
+ movl %ecx, 56(%esp) # 4-byte Spill
+ movl 60(%esp), %ecx # 4-byte Reload
+ adcl 1604(%esp), %ecx
+ movl %ecx, 60(%esp) # 4-byte Spill
+ movl 84(%esp), %ecx # 4-byte Reload
+ adcl 1608(%esp), %ecx
+ movl %ecx, 84(%esp) # 4-byte Spill
+ movl 80(%esp), %ecx # 4-byte Reload
+ adcl 1612(%esp), %ecx
+ movl %ecx, 80(%esp) # 4-byte Spill
+ adcl 1616(%esp), %esi
+ adcl 1620(%esp), %edi
+ movl %edi, 96(%esp) # 4-byte Spill
+ movl 100(%esp), %ecx # 4-byte Reload
+ adcl 1624(%esp), %ecx
+ movl %ecx, 100(%esp) # 4-byte Spill
+ movl 92(%esp), %ecx # 4-byte Reload
+ adcl 1628(%esp), %ecx
+ movl %ecx, 92(%esp) # 4-byte Spill
+ adcl 1632(%esp), %ebp
+ movl %ebp, 68(%esp) # 4-byte Spill
+ adcl $0, %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl %edx, %edi
+ movl %edi, %eax
+ imull 36(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 1512(%esp), %ecx
+ movl 2060(%esp), %edx
+ calll .LmulPv480x32
+ addl 1512(%esp), %edi
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 1516(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 40(%esp), %eax # 4-byte Reload
+ adcl 1520(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 1524(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 1528(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 52(%esp), %edi # 4-byte Reload
+ adcl 1532(%esp), %edi
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 1536(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 1540(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 1544(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 80(%esp), %ebp # 4-byte Reload
+ adcl 1548(%esp), %ebp
+ adcl 1552(%esp), %esi
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 1556(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 1560(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 1564(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 1568(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 1572(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 2056(%esp), %eax
+ movl 16(%eax), %eax
+ movl %eax, (%esp)
+ leal 1448(%esp), %ecx
+ movl 2052(%esp), %edx
+ calll .LmulPv480x32
+ movl 1508(%esp), %eax
+ movl 72(%esp), %edx # 4-byte Reload
+ addl 1448(%esp), %edx
+ movl 40(%esp), %ecx # 4-byte Reload
+ adcl 1452(%esp), %ecx
+ movl %ecx, 40(%esp) # 4-byte Spill
+ movl 48(%esp), %ecx # 4-byte Reload
+ adcl 1456(%esp), %ecx
+ movl %ecx, 48(%esp) # 4-byte Spill
+ movl 44(%esp), %ecx # 4-byte Reload
+ adcl 1460(%esp), %ecx
+ movl %ecx, 44(%esp) # 4-byte Spill
+ adcl 1464(%esp), %edi
+ movl %edi, 52(%esp) # 4-byte Spill
+ movl 56(%esp), %ecx # 4-byte Reload
+ adcl 1468(%esp), %ecx
+ movl %ecx, 56(%esp) # 4-byte Spill
+ movl 60(%esp), %edi # 4-byte Reload
+ adcl 1472(%esp), %edi
+ movl 84(%esp), %ecx # 4-byte Reload
+ adcl 1476(%esp), %ecx
+ movl %ecx, 84(%esp) # 4-byte Spill
+ adcl 1480(%esp), %ebp
+ movl %ebp, 80(%esp) # 4-byte Spill
+ adcl 1484(%esp), %esi
+ movl %esi, 76(%esp) # 4-byte Spill
+ movl 96(%esp), %ecx # 4-byte Reload
+ adcl 1488(%esp), %ecx
+ movl %ecx, 96(%esp) # 4-byte Spill
+ movl 100(%esp), %ecx # 4-byte Reload
+ adcl 1492(%esp), %ecx
+ movl %ecx, 100(%esp) # 4-byte Spill
+ movl 92(%esp), %ecx # 4-byte Reload
+ adcl 1496(%esp), %ecx
+ movl %ecx, 92(%esp) # 4-byte Spill
+ movl 68(%esp), %ecx # 4-byte Reload
+ adcl 1500(%esp), %ecx
+ movl %ecx, 68(%esp) # 4-byte Spill
+ movl 88(%esp), %ebp # 4-byte Reload
+ adcl 1504(%esp), %ebp
+ adcl $0, %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl %edx, %eax
+ movl %edx, %esi
+ imull 36(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 1384(%esp), %ecx
+ movl 2060(%esp), %edx
+ calll .LmulPv480x32
+ addl 1384(%esp), %esi
+ movl 40(%esp), %eax # 4-byte Reload
+ adcl 1388(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 1392(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 1396(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 1400(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 1404(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ adcl 1408(%esp), %edi
+ movl %edi, 60(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 1412(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 1416(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 1420(%esp), %eax
+ movl %eax, %esi
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 1424(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 1428(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 1432(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 1436(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ adcl 1440(%esp), %ebp
+ movl %ebp, 88(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 1444(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 2056(%esp), %eax
+ movl 20(%eax), %eax
+ movl %eax, (%esp)
+ leal 1320(%esp), %ecx
+ movl 2052(%esp), %edx
+ calll .LmulPv480x32
+ movl 1380(%esp), %edx
+ movl 40(%esp), %ecx # 4-byte Reload
+ addl 1320(%esp), %ecx
+ movl %ecx, 40(%esp) # 4-byte Spill
+ movl 48(%esp), %ebp # 4-byte Reload
+ adcl 1324(%esp), %ebp
+ movl 44(%esp), %edi # 4-byte Reload
+ adcl 1328(%esp), %edi
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 1332(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 1336(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 1340(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 1344(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 1348(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ adcl 1352(%esp), %esi
+ movl %esi, 76(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 1356(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 1360(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 1364(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 68(%esp), %esi # 4-byte Reload
+ adcl 1368(%esp), %esi
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 1372(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 1376(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ adcl $0, %edx
+ movl %edx, 64(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ imull 36(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 1256(%esp), %ecx
+ movl 2060(%esp), %edx
+ calll .LmulPv480x32
+ movl 40(%esp), %eax # 4-byte Reload
+ addl 1256(%esp), %eax
+ adcl 1260(%esp), %ebp
+ movl %ebp, 48(%esp) # 4-byte Spill
+ adcl 1264(%esp), %edi
+ movl %edi, 44(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 1268(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 56(%esp), %ebp # 4-byte Reload
+ adcl 1272(%esp), %ebp
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 1276(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 1280(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 1284(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 1288(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 1292(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 100(%esp), %edi # 4-byte Reload
+ adcl 1296(%esp), %edi
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 1300(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ adcl 1304(%esp), %esi
+ movl %esi, 68(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 1308(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 72(%esp), %esi # 4-byte Reload
+ adcl 1312(%esp), %esi
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 1316(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 2056(%esp), %eax
+ movl 24(%eax), %eax
+ movl %eax, (%esp)
+ leal 1192(%esp), %ecx
+ movl 2052(%esp), %eax
+ movl %eax, %edx
+ calll .LmulPv480x32
+ movl 1252(%esp), %eax
+ movl 48(%esp), %edx # 4-byte Reload
+ addl 1192(%esp), %edx
+ movl 44(%esp), %ecx # 4-byte Reload
+ adcl 1196(%esp), %ecx
+ movl %ecx, 44(%esp) # 4-byte Spill
+ movl 52(%esp), %ecx # 4-byte Reload
+ adcl 1200(%esp), %ecx
+ movl %ecx, 52(%esp) # 4-byte Spill
+ adcl 1204(%esp), %ebp
+ movl %ebp, 56(%esp) # 4-byte Spill
+ movl 60(%esp), %ecx # 4-byte Reload
+ adcl 1208(%esp), %ecx
+ movl %ecx, 60(%esp) # 4-byte Spill
+ movl 84(%esp), %ebp # 4-byte Reload
+ adcl 1212(%esp), %ebp
+ movl 80(%esp), %ecx # 4-byte Reload
+ adcl 1216(%esp), %ecx
+ movl %ecx, 80(%esp) # 4-byte Spill
+ movl 76(%esp), %ecx # 4-byte Reload
+ adcl 1220(%esp), %ecx
+ movl %ecx, 76(%esp) # 4-byte Spill
+ movl 96(%esp), %ecx # 4-byte Reload
+ adcl 1224(%esp), %ecx
+ movl %ecx, 96(%esp) # 4-byte Spill
+ adcl 1228(%esp), %edi
+ movl %edi, 100(%esp) # 4-byte Spill
+ movl 92(%esp), %ecx # 4-byte Reload
+ adcl 1232(%esp), %ecx
+ movl %ecx, 92(%esp) # 4-byte Spill
+ movl 68(%esp), %ecx # 4-byte Reload
+ adcl 1236(%esp), %ecx
+ movl %ecx, 68(%esp) # 4-byte Spill
+ movl 88(%esp), %ecx # 4-byte Reload
+ adcl 1240(%esp), %ecx
+ movl %ecx, 88(%esp) # 4-byte Spill
+ adcl 1244(%esp), %esi
+ movl %esi, 72(%esp) # 4-byte Spill
+ movl 64(%esp), %esi # 4-byte Reload
+ adcl 1248(%esp), %esi
+ adcl $0, %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl %edx, %edi
+ movl %edi, %eax
+ imull 36(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 1128(%esp), %ecx
+ movl 2060(%esp), %eax
+ movl %eax, %edx
+ calll .LmulPv480x32
+ addl 1128(%esp), %edi
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 1132(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 1136(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 56(%esp), %edi # 4-byte Reload
+ adcl 1140(%esp), %edi
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 1144(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ adcl 1148(%esp), %ebp
+ movl %ebp, 84(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 1152(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 1156(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 1160(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 1164(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 92(%esp), %ebp # 4-byte Reload
+ adcl 1168(%esp), %ebp
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 1172(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 1176(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 1180(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ adcl 1184(%esp), %esi
+ movl %esi, 64(%esp) # 4-byte Spill
+ movl 48(%esp), %esi # 4-byte Reload
+ adcl 1188(%esp), %esi
+ movl 2056(%esp), %eax
+ movl 28(%eax), %eax
+ movl %eax, (%esp)
+ leal 1064(%esp), %ecx
+ movl 2052(%esp), %edx
+ calll .LmulPv480x32
+ movl 1124(%esp), %eax
+ movl 44(%esp), %edx # 4-byte Reload
+ addl 1064(%esp), %edx
+ movl 52(%esp), %ecx # 4-byte Reload
+ adcl 1068(%esp), %ecx
+ movl %ecx, 52(%esp) # 4-byte Spill
+ adcl 1072(%esp), %edi
+ movl %edi, 56(%esp) # 4-byte Spill
+ movl 60(%esp), %ecx # 4-byte Reload
+ adcl 1076(%esp), %ecx
+ movl %ecx, 60(%esp) # 4-byte Spill
+ movl 84(%esp), %ecx # 4-byte Reload
+ adcl 1080(%esp), %ecx
+ movl %ecx, 84(%esp) # 4-byte Spill
+ movl 80(%esp), %edi # 4-byte Reload
+ adcl 1084(%esp), %edi
+ movl 76(%esp), %ecx # 4-byte Reload
+ adcl 1088(%esp), %ecx
+ movl %ecx, 76(%esp) # 4-byte Spill
+ movl 96(%esp), %ecx # 4-byte Reload
+ adcl 1092(%esp), %ecx
+ movl %ecx, 96(%esp) # 4-byte Spill
+ movl 100(%esp), %ecx # 4-byte Reload
+ adcl 1096(%esp), %ecx
+ movl %ecx, 100(%esp) # 4-byte Spill
+ adcl 1100(%esp), %ebp
+ movl %ebp, 92(%esp) # 4-byte Spill
+ movl 68(%esp), %ecx # 4-byte Reload
+ adcl 1104(%esp), %ecx
+ movl %ecx, 68(%esp) # 4-byte Spill
+ movl 88(%esp), %ecx # 4-byte Reload
+ adcl 1108(%esp), %ecx
+ movl %ecx, 88(%esp) # 4-byte Spill
+ movl 72(%esp), %ecx # 4-byte Reload
+ adcl 1112(%esp), %ecx
+ movl %ecx, 72(%esp) # 4-byte Spill
+ movl 64(%esp), %ecx # 4-byte Reload
+ adcl 1116(%esp), %ecx
+ movl %ecx, 64(%esp) # 4-byte Spill
+ adcl 1120(%esp), %esi
+ movl %esi, 48(%esp) # 4-byte Spill
+ adcl $0, %eax
+ movl %eax, %esi
+ movl %edx, %ebp
+ movl %ebp, %eax
+ imull 36(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 1000(%esp), %ecx
+ movl 2060(%esp), %edx
+ calll .LmulPv480x32
+ addl 1000(%esp), %ebp
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 1004(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 1008(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 60(%esp), %ebp # 4-byte Reload
+ adcl 1012(%esp), %ebp
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 1016(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ adcl 1020(%esp), %edi
+ movl %edi, 80(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 1024(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 96(%esp), %edi # 4-byte Reload
+ adcl 1028(%esp), %edi
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 1032(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 1036(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 1040(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 1044(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 1048(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 1052(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 1056(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ adcl 1060(%esp), %esi
+ movl 2056(%esp), %eax
+ movl 32(%eax), %eax
+ movl %eax, (%esp)
+ leal 936(%esp), %ecx
+ movl 2052(%esp), %edx
+ calll .LmulPv480x32
+ movl 996(%esp), %eax
+ movl 52(%esp), %edx # 4-byte Reload
+ addl 936(%esp), %edx
+ movl 56(%esp), %ecx # 4-byte Reload
+ adcl 940(%esp), %ecx
+ movl %ecx, 56(%esp) # 4-byte Spill
+ adcl 944(%esp), %ebp
+ movl %ebp, 60(%esp) # 4-byte Spill
+ movl 84(%esp), %ecx # 4-byte Reload
+ adcl 948(%esp), %ecx
+ movl %ecx, 84(%esp) # 4-byte Spill
+ movl 80(%esp), %ecx # 4-byte Reload
+ adcl 952(%esp), %ecx
+ movl %ecx, 80(%esp) # 4-byte Spill
+ movl 76(%esp), %ecx # 4-byte Reload
+ adcl 956(%esp), %ecx
+ movl %ecx, 76(%esp) # 4-byte Spill
+ adcl 960(%esp), %edi
+ movl %edi, 96(%esp) # 4-byte Spill
+ movl 100(%esp), %ecx # 4-byte Reload
+ adcl 964(%esp), %ecx
+ movl %ecx, 100(%esp) # 4-byte Spill
+ movl 92(%esp), %ecx # 4-byte Reload
+ adcl 968(%esp), %ecx
+ movl %ecx, 92(%esp) # 4-byte Spill
+ movl 68(%esp), %ecx # 4-byte Reload
+ adcl 972(%esp), %ecx
+ movl %ecx, 68(%esp) # 4-byte Spill
+ movl 88(%esp), %ecx # 4-byte Reload
+ adcl 976(%esp), %ecx
+ movl %ecx, 88(%esp) # 4-byte Spill
+ movl 72(%esp), %ecx # 4-byte Reload
+ adcl 980(%esp), %ecx
+ movl %ecx, 72(%esp) # 4-byte Spill
+ movl 64(%esp), %ecx # 4-byte Reload
+ adcl 984(%esp), %ecx
+ movl %ecx, 64(%esp) # 4-byte Spill
+ movl 48(%esp), %ecx # 4-byte Reload
+ adcl 988(%esp), %ecx
+ movl %ecx, 48(%esp) # 4-byte Spill
+ adcl 992(%esp), %esi
+ movl %esi, 52(%esp) # 4-byte Spill
+ adcl $0, %eax
+ movl %eax, %esi
+ movl %edx, %edi
+ movl %edi, %eax
+ imull 36(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 872(%esp), %ecx
+ movl 2060(%esp), %edx
+ calll .LmulPv480x32
+ addl 872(%esp), %edi
+ movl 56(%esp), %ebp # 4-byte Reload
+ adcl 876(%esp), %ebp
+ movl 60(%esp), %edi # 4-byte Reload
+ adcl 880(%esp), %edi
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 884(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 888(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 892(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 896(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 900(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 904(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 908(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 912(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 916(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 920(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 924(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 928(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ adcl 932(%esp), %esi
+ movl %esi, 56(%esp) # 4-byte Spill
+ movl 2056(%esp), %eax
+ movl 36(%eax), %eax
+ movl %eax, (%esp)
+ leal 808(%esp), %ecx
+ movl 2052(%esp), %edx
+ calll .LmulPv480x32
+ movl 868(%esp), %eax
+ movl %ebp, %ecx
+ addl 808(%esp), %ecx
+ adcl 812(%esp), %edi
+ movl %edi, 60(%esp) # 4-byte Spill
+ movl 84(%esp), %edx # 4-byte Reload
+ adcl 816(%esp), %edx
+ movl %edx, 84(%esp) # 4-byte Spill
+ movl 80(%esp), %edx # 4-byte Reload
+ adcl 820(%esp), %edx
+ movl %edx, 80(%esp) # 4-byte Spill
+ movl 76(%esp), %edx # 4-byte Reload
+ adcl 824(%esp), %edx
+ movl %edx, 76(%esp) # 4-byte Spill
+ movl 96(%esp), %edx # 4-byte Reload
+ adcl 828(%esp), %edx
+ movl %edx, 96(%esp) # 4-byte Spill
+ movl 100(%esp), %edx # 4-byte Reload
+ adcl 832(%esp), %edx
+ movl %edx, 100(%esp) # 4-byte Spill
+ movl 92(%esp), %edx # 4-byte Reload
+ adcl 836(%esp), %edx
+ movl %edx, 92(%esp) # 4-byte Spill
+ movl 68(%esp), %edi # 4-byte Reload
+ adcl 840(%esp), %edi
+ movl 88(%esp), %esi # 4-byte Reload
+ adcl 844(%esp), %esi
+ movl 72(%esp), %edx # 4-byte Reload
+ adcl 848(%esp), %edx
+ movl %edx, 72(%esp) # 4-byte Spill
+ movl 64(%esp), %edx # 4-byte Reload
+ adcl 852(%esp), %edx
+ movl %edx, 64(%esp) # 4-byte Spill
+ movl 48(%esp), %edx # 4-byte Reload
+ adcl 856(%esp), %edx
+ movl %edx, 48(%esp) # 4-byte Spill
+ movl 52(%esp), %edx # 4-byte Reload
+ adcl 860(%esp), %edx
+ movl %edx, 52(%esp) # 4-byte Spill
+ movl 56(%esp), %edx # 4-byte Reload
+ adcl 864(%esp), %edx
+ movl %edx, 56(%esp) # 4-byte Spill
+ adcl $0, %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ movl %ecx, %ebp
+ imull 36(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 744(%esp), %ecx
+ movl 2060(%esp), %edx
+ calll .LmulPv480x32
+ addl 744(%esp), %ebp
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 748(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 752(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 756(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 760(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 764(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 100(%esp), %ebp # 4-byte Reload
+ adcl 768(%esp), %ebp
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 772(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ adcl 776(%esp), %edi
+ adcl 780(%esp), %esi
+ movl %esi, 88(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 784(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 788(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 48(%esp), %esi # 4-byte Reload
+ adcl 792(%esp), %esi
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 796(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 800(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 804(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 2056(%esp), %eax
+ movl 40(%eax), %eax
+ movl %eax, (%esp)
+ leal 680(%esp), %ecx
+ movl 2052(%esp), %edx
+ calll .LmulPv480x32
+ movl 740(%esp), %eax
+ movl 60(%esp), %ecx # 4-byte Reload
+ addl 680(%esp), %ecx
+ movl 84(%esp), %edx # 4-byte Reload
+ adcl 684(%esp), %edx
+ movl %edx, 84(%esp) # 4-byte Spill
+ movl 80(%esp), %edx # 4-byte Reload
+ adcl 688(%esp), %edx
+ movl %edx, 80(%esp) # 4-byte Spill
+ movl 76(%esp), %edx # 4-byte Reload
+ adcl 692(%esp), %edx
+ movl %edx, 76(%esp) # 4-byte Spill
+ movl 96(%esp), %edx # 4-byte Reload
+ adcl 696(%esp), %edx
+ movl %edx, 96(%esp) # 4-byte Spill
+ adcl 700(%esp), %ebp
+ movl %ebp, 100(%esp) # 4-byte Spill
+ movl 92(%esp), %edx # 4-byte Reload
+ adcl 704(%esp), %edx
+ movl %edx, 92(%esp) # 4-byte Spill
+ adcl 708(%esp), %edi
+ movl 88(%esp), %edx # 4-byte Reload
+ adcl 712(%esp), %edx
+ movl %edx, 88(%esp) # 4-byte Spill
+ movl 72(%esp), %ebp # 4-byte Reload
+ adcl 716(%esp), %ebp
+ movl 64(%esp), %edx # 4-byte Reload
+ adcl 720(%esp), %edx
+ movl %edx, 64(%esp) # 4-byte Spill
+ adcl 724(%esp), %esi
+ movl %esi, 48(%esp) # 4-byte Spill
+ movl 52(%esp), %edx # 4-byte Reload
+ adcl 728(%esp), %edx
+ movl %edx, 52(%esp) # 4-byte Spill
+ movl 56(%esp), %edx # 4-byte Reload
+ adcl 732(%esp), %edx
+ movl %edx, 56(%esp) # 4-byte Spill
+ movl 44(%esp), %edx # 4-byte Reload
+ adcl 736(%esp), %edx
+ movl %edx, 44(%esp) # 4-byte Spill
+ adcl $0, %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ movl %ecx, %esi
+ imull 36(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 616(%esp), %ecx
+ movl 2060(%esp), %edx
+ calll .LmulPv480x32
+ addl 616(%esp), %esi
+ movl 84(%esp), %esi # 4-byte Reload
+ adcl 620(%esp), %esi
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 624(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 628(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 632(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 636(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 640(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ adcl 644(%esp), %edi
+ movl %edi, 68(%esp) # 4-byte Spill
+ movl 88(%esp), %edi # 4-byte Reload
+ adcl 648(%esp), %edi
+ adcl 652(%esp), %ebp
+ movl %ebp, 72(%esp) # 4-byte Spill
+ movl 64(%esp), %ebp # 4-byte Reload
+ adcl 656(%esp), %ebp
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 660(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 664(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 668(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 672(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 676(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 2056(%esp), %eax
+ movl 44(%eax), %eax
+ movl %eax, (%esp)
+ leal 552(%esp), %ecx
+ movl 2052(%esp), %edx
+ calll .LmulPv480x32
+ movl 612(%esp), %edx
+ movl %esi, %ecx
+ addl 552(%esp), %ecx
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 556(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 560(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 564(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 568(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 92(%esp), %esi # 4-byte Reload
+ adcl 572(%esp), %esi
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 576(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ adcl 580(%esp), %edi
+ movl %edi, 88(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 584(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ adcl 588(%esp), %ebp
+ movl %ebp, 64(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 592(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 596(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 600(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 604(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 608(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ adcl $0, %edx
+ movl %edx, 84(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ movl %ecx, %ebp
+ imull 36(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 488(%esp), %ecx
+ movl 2060(%esp), %edx
+ calll .LmulPv480x32
+ addl 488(%esp), %ebp
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 492(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 496(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 500(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 504(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl %esi, %ebp
+ adcl 508(%esp), %ebp
+ movl 68(%esp), %edi # 4-byte Reload
+ adcl 512(%esp), %edi
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 516(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 520(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 524(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 48(%esp), %esi # 4-byte Reload
+ adcl 528(%esp), %esi
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 532(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 536(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 540(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 544(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 548(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 2056(%esp), %eax
+ movl 48(%eax), %eax
+ movl %eax, (%esp)
+ leal 424(%esp), %ecx
+ movl 2052(%esp), %edx
+ calll .LmulPv480x32
+ movl 484(%esp), %edx
+ movl 80(%esp), %ecx # 4-byte Reload
+ addl 424(%esp), %ecx
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 428(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 432(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 436(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ adcl 440(%esp), %ebp
+ movl %ebp, 92(%esp) # 4-byte Spill
+ adcl 444(%esp), %edi
+ movl %edi, 68(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 448(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 452(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 456(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl %esi, %edi
+ adcl 460(%esp), %edi
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 464(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 468(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 472(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 476(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 480(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ adcl $0, %edx
+ movl %edx, 80(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ movl %ecx, %ebp
+ imull 36(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 360(%esp), %ecx
+ movl 2060(%esp), %edx
+ calll .LmulPv480x32
+ addl 360(%esp), %ebp
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 364(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 96(%esp), %esi # 4-byte Reload
+ adcl 368(%esp), %esi
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 372(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 92(%esp), %ebp # 4-byte Reload
+ adcl 376(%esp), %ebp
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 380(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 384(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 388(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 392(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ adcl 396(%esp), %edi
+ movl %edi, 48(%esp) # 4-byte Spill
+ movl 52(%esp), %edi # 4-byte Reload
+ adcl 400(%esp), %edi
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 404(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 408(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 412(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 416(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 420(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 2056(%esp), %eax
+ movl 52(%eax), %eax
+ movl %eax, (%esp)
+ leal 296(%esp), %ecx
+ movl 2052(%esp), %edx
+ calll .LmulPv480x32
+ movl 356(%esp), %edx
+ movl 76(%esp), %ecx # 4-byte Reload
+ addl 296(%esp), %ecx
+ adcl 300(%esp), %esi
+ movl %esi, 96(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 304(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ adcl 308(%esp), %ebp
+ movl %ebp, 92(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 312(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 316(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 320(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 324(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 328(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ adcl 332(%esp), %edi
+ movl %edi, 52(%esp) # 4-byte Spill
+ movl 56(%esp), %edi # 4-byte Reload
+ adcl 336(%esp), %edi
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 340(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 344(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 348(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 352(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ adcl $0, %edx
+ movl %edx, 76(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ movl %ecx, %esi
+ imull 36(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 232(%esp), %ecx
+ movl 2060(%esp), %edx
+ calll .LmulPv480x32
+ addl 232(%esp), %esi
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 236(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 100(%esp), %ebp # 4-byte Reload
+ adcl 240(%esp), %ebp
+ movl 92(%esp), %esi # 4-byte Reload
+ adcl 244(%esp), %esi
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 248(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 252(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 256(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 260(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 264(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 268(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ adcl 272(%esp), %edi
+ movl %edi, 56(%esp) # 4-byte Spill
+ movl 44(%esp), %edi # 4-byte Reload
+ adcl 276(%esp), %edi
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 280(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 284(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 288(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 292(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 2056(%esp), %eax
+ movl 56(%eax), %eax
+ movl %eax, (%esp)
+ leal 168(%esp), %ecx
+ movl 2052(%esp), %edx
+ calll .LmulPv480x32
+ movl 228(%esp), %edx
+ movl 96(%esp), %ecx # 4-byte Reload
+ addl 168(%esp), %ecx
+ adcl 172(%esp), %ebp
+ movl %ebp, 100(%esp) # 4-byte Spill
+ adcl 176(%esp), %esi
+ movl %esi, 92(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 180(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 184(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 72(%esp), %esi # 4-byte Reload
+ adcl 188(%esp), %esi
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 192(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 196(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 200(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 204(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ adcl 208(%esp), %edi
+ movl %edi, 44(%esp) # 4-byte Spill
+ movl 60(%esp), %ebp # 4-byte Reload
+ adcl 212(%esp), %ebp
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 216(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 220(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 224(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ adcl $0, %edx
+ movl %edx, 96(%esp) # 4-byte Spill
+ movl 36(%esp), %eax # 4-byte Reload
+ imull %ecx, %eax
+ movl %ecx, %edi
+ movl %eax, (%esp)
+ leal 104(%esp), %ecx
+ movl 2060(%esp), %eax
+ movl %eax, %edx
+ calll .LmulPv480x32
+ addl 104(%esp), %edi
+ movl 68(%esp), %edi # 4-byte Reload
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 108(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 92(%esp), %ecx # 4-byte Reload
+ adcl 112(%esp), %ecx
+ movl %ecx, 92(%esp) # 4-byte Spill
+ movl %ecx, %ebx
+ adcl 116(%esp), %edi
+ movl 88(%esp), %ecx # 4-byte Reload
+ adcl 120(%esp), %ecx
+ movl %ecx, 88(%esp) # 4-byte Spill
+ adcl 124(%esp), %esi
+ movl %esi, 72(%esp) # 4-byte Spill
+ movl 64(%esp), %ecx # 4-byte Reload
+ adcl 128(%esp), %ecx
+ movl %ecx, 64(%esp) # 4-byte Spill
+ movl 48(%esp), %ecx # 4-byte Reload
+ adcl 132(%esp), %ecx
+ movl %ecx, 48(%esp) # 4-byte Spill
+ movl 52(%esp), %ecx # 4-byte Reload
+ adcl 136(%esp), %ecx
+ movl %ecx, 52(%esp) # 4-byte Spill
+ movl 56(%esp), %ecx # 4-byte Reload
+ adcl 140(%esp), %ecx
+ movl %ecx, 56(%esp) # 4-byte Spill
+ movl 44(%esp), %ecx # 4-byte Reload
+ adcl 144(%esp), %ecx
+ movl %ecx, 44(%esp) # 4-byte Spill
+ adcl 148(%esp), %ebp
+ movl %ebp, 60(%esp) # 4-byte Spill
+ movl 84(%esp), %ecx # 4-byte Reload
+ adcl 152(%esp), %ecx
+ movl %ecx, 84(%esp) # 4-byte Spill
+ movl 80(%esp), %ecx # 4-byte Reload
+ adcl 156(%esp), %ecx
+ movl %ecx, 80(%esp) # 4-byte Spill
+ movl 76(%esp), %ecx # 4-byte Reload
+ adcl 160(%esp), %ecx
+ movl %ecx, 76(%esp) # 4-byte Spill
+ movl 96(%esp), %ecx # 4-byte Reload
+ adcl 164(%esp), %ecx
+ movl %ecx, 96(%esp) # 4-byte Spill
+ movl %eax, %edx
+ movl 2060(%esp), %ecx
+ subl (%ecx), %edx
+ movl %ebx, %ebp
+ sbbl 4(%ecx), %ebp
+ movl %edi, %ebx
+ sbbl 8(%ecx), %ebx
+ movl 88(%esp), %eax # 4-byte Reload
+ sbbl 12(%ecx), %eax
+ sbbl 16(%ecx), %esi
+ movl %esi, 4(%esp) # 4-byte Spill
+ movl 64(%esp), %esi # 4-byte Reload
+ sbbl 20(%ecx), %esi
+ movl %esi, 8(%esp) # 4-byte Spill
+ movl 48(%esp), %esi # 4-byte Reload
+ sbbl 24(%ecx), %esi
+ movl %esi, 12(%esp) # 4-byte Spill
+ movl 52(%esp), %esi # 4-byte Reload
+ sbbl 28(%ecx), %esi
+ movl %esi, 16(%esp) # 4-byte Spill
+ movl 56(%esp), %esi # 4-byte Reload
+ sbbl 32(%ecx), %esi
+ movl %esi, 20(%esp) # 4-byte Spill
+ movl 44(%esp), %esi # 4-byte Reload
+ sbbl 36(%ecx), %esi
+ movl %esi, 24(%esp) # 4-byte Spill
+ movl 60(%esp), %esi # 4-byte Reload
+ sbbl 40(%ecx), %esi
+ movl %esi, 28(%esp) # 4-byte Spill
+ movl 84(%esp), %esi # 4-byte Reload
+ sbbl 44(%ecx), %esi
+ movl %esi, 32(%esp) # 4-byte Spill
+ movl 80(%esp), %esi # 4-byte Reload
+ sbbl 48(%ecx), %esi
+ movl %esi, 36(%esp) # 4-byte Spill
+ movl 76(%esp), %esi # 4-byte Reload
+ sbbl 52(%ecx), %esi
+ movl %esi, 40(%esp) # 4-byte Spill
+ movl 96(%esp), %esi # 4-byte Reload
+ sbbl 56(%ecx), %esi
+ movl %esi, 68(%esp) # 4-byte Spill
+ movl %esi, %ecx
+ sarl $31, %ecx
+ testl %ecx, %ecx
+ movl 100(%esp), %ecx # 4-byte Reload
+ js .LBB227_2
+# BB#1:
+ movl %edx, %ecx
+.LBB227_2:
+ movl 2048(%esp), %edx
+ movl %ecx, (%edx)
+ movl 92(%esp), %esi # 4-byte Reload
+ js .LBB227_4
+# BB#3:
+ movl %ebp, %esi
+.LBB227_4:
+ movl %esi, 4(%edx)
+ movl 88(%esp), %ecx # 4-byte Reload
+ js .LBB227_6
+# BB#5:
+ movl %ebx, %edi
+.LBB227_6:
+ movl %edi, 8(%edx)
+ js .LBB227_8
+# BB#7:
+ movl %eax, %ecx
+.LBB227_8:
+ movl %ecx, 12(%edx)
+ movl 72(%esp), %eax # 4-byte Reload
+ js .LBB227_10
+# BB#9:
+ movl 4(%esp), %eax # 4-byte Reload
+.LBB227_10:
+ movl %eax, 16(%edx)
+ movl 64(%esp), %eax # 4-byte Reload
+ js .LBB227_12
+# BB#11:
+ movl 8(%esp), %eax # 4-byte Reload
+.LBB227_12:
+ movl %eax, 20(%edx)
+ movl 48(%esp), %eax # 4-byte Reload
+ js .LBB227_14
+# BB#13:
+ movl 12(%esp), %eax # 4-byte Reload
+.LBB227_14:
+ movl %eax, 24(%edx)
+ movl 52(%esp), %eax # 4-byte Reload
+ js .LBB227_16
+# BB#15:
+ movl 16(%esp), %eax # 4-byte Reload
+.LBB227_16:
+ movl %eax, 28(%edx)
+ movl 56(%esp), %eax # 4-byte Reload
+ js .LBB227_18
+# BB#17:
+ movl 20(%esp), %eax # 4-byte Reload
+.LBB227_18:
+ movl %eax, 32(%edx)
+ movl 44(%esp), %eax # 4-byte Reload
+ js .LBB227_20
+# BB#19:
+ movl 24(%esp), %eax # 4-byte Reload
+.LBB227_20:
+ movl %eax, 36(%edx)
+ movl 60(%esp), %eax # 4-byte Reload
+ js .LBB227_22
+# BB#21:
+ movl 28(%esp), %eax # 4-byte Reload
+.LBB227_22:
+ movl %eax, 40(%edx)
+ movl 84(%esp), %eax # 4-byte Reload
+ js .LBB227_24
+# BB#23:
+ movl 32(%esp), %eax # 4-byte Reload
+.LBB227_24:
+ movl %eax, 44(%edx)
+ movl 80(%esp), %eax # 4-byte Reload
+ js .LBB227_26
+# BB#25:
+ movl 36(%esp), %eax # 4-byte Reload
+.LBB227_26:
+ movl %eax, 48(%edx)
+ movl 76(%esp), %eax # 4-byte Reload
+ js .LBB227_28
+# BB#27:
+ movl 40(%esp), %eax # 4-byte Reload
+.LBB227_28:
+ movl %eax, 52(%edx)
+ movl 96(%esp), %eax # 4-byte Reload
+ js .LBB227_30
+# BB#29:
+ movl 68(%esp), %eax # 4-byte Reload
+.LBB227_30:
+ movl %eax, 56(%edx)
+ addl $2028, %esp # imm = 0x7EC
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end227:
+ .size mcl_fp_montNF15L, .Lfunc_end227-mcl_fp_montNF15L
+
+ .globl mcl_fp_montRed15L
+ .align 16, 0x90
+ .type mcl_fp_montRed15L,@function
+mcl_fp_montRed15L: # @mcl_fp_montRed15L
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $1148, %esp # imm = 0x47C
+ calll .L228$pb
+.L228$pb:
+ popl %eax
+.Ltmp49:
+ addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp49-.L228$pb), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 1176(%esp), %edx
+ movl -4(%edx), %esi
+ movl %esi, 88(%esp) # 4-byte Spill
+ movl 1172(%esp), %ecx
+ movl (%ecx), %ebx
+ movl %ebx, 80(%esp) # 4-byte Spill
+ movl 4(%ecx), %edi
+ movl %edi, 84(%esp) # 4-byte Spill
+ imull %esi, %ebx
+ movl 116(%ecx), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 112(%ecx), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 108(%ecx), %esi
+ movl %esi, 116(%esp) # 4-byte Spill
+ movl 104(%ecx), %esi
+ movl %esi, 128(%esp) # 4-byte Spill
+ movl 100(%ecx), %eax
+ movl %eax, 132(%esp) # 4-byte Spill
+ movl 96(%ecx), %esi
+ movl %esi, 152(%esp) # 4-byte Spill
+ movl 92(%ecx), %eax
+ movl %eax, 136(%esp) # 4-byte Spill
+ movl 88(%ecx), %eax
+ movl %eax, 148(%esp) # 4-byte Spill
+ movl 84(%ecx), %eax
+ movl %eax, 160(%esp) # 4-byte Spill
+ movl 80(%ecx), %eax
+ movl %eax, 144(%esp) # 4-byte Spill
+ movl 76(%ecx), %esi
+ movl %esi, 168(%esp) # 4-byte Spill
+ movl 72(%ecx), %esi
+ movl %esi, 164(%esp) # 4-byte Spill
+ movl 68(%ecx), %esi
+ movl %esi, 176(%esp) # 4-byte Spill
+ movl 64(%ecx), %eax
+ movl %eax, 180(%esp) # 4-byte Spill
+ movl 60(%ecx), %eax
+ movl %eax, 172(%esp) # 4-byte Spill
+ movl 56(%ecx), %esi
+ movl %esi, 156(%esp) # 4-byte Spill
+ movl 52(%ecx), %esi
+ movl %esi, 140(%esp) # 4-byte Spill
+ movl 48(%ecx), %eax
+ movl %eax, 120(%esp) # 4-byte Spill
+ movl 44(%ecx), %esi
+ movl %esi, 124(%esp) # 4-byte Spill
+ movl 40(%ecx), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 36(%ecx), %edi
+ movl %edi, 100(%esp) # 4-byte Spill
+ movl 32(%ecx), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 28(%ecx), %ebp
+ movl 24(%ecx), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 20(%ecx), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 16(%ecx), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 12(%ecx), %edi
+ movl 8(%ecx), %esi
+ movl (%edx), %ecx
+ movl %ecx, 16(%esp) # 4-byte Spill
+ movl 56(%edx), %ecx
+ movl %ecx, 60(%esp) # 4-byte Spill
+ movl 52(%edx), %ecx
+ movl %ecx, 56(%esp) # 4-byte Spill
+ movl 48(%edx), %ecx
+ movl %ecx, 52(%esp) # 4-byte Spill
+ movl 44(%edx), %ecx
+ movl %ecx, 48(%esp) # 4-byte Spill
+ movl 40(%edx), %ecx
+ movl %ecx, 44(%esp) # 4-byte Spill
+ movl 36(%edx), %ecx
+ movl %ecx, 40(%esp) # 4-byte Spill
+ movl 32(%edx), %ecx
+ movl %ecx, 36(%esp) # 4-byte Spill
+ movl 28(%edx), %ecx
+ movl %ecx, 32(%esp) # 4-byte Spill
+ movl 24(%edx), %ecx
+ movl %ecx, 28(%esp) # 4-byte Spill
+ movl 20(%edx), %ecx
+ movl %ecx, 24(%esp) # 4-byte Spill
+ movl 16(%edx), %ecx
+ movl %ecx, 20(%esp) # 4-byte Spill
+ movl 12(%edx), %ecx
+ movl %ecx, 12(%esp) # 4-byte Spill
+ movl 8(%edx), %ecx
+ movl %ecx, 8(%esp) # 4-byte Spill
+ movl 4(%edx), %ecx
+ movl %ecx, 4(%esp) # 4-byte Spill
+ movl %ebx, (%esp)
+ leal 1080(%esp), %ecx
+ movl 92(%esp), %ebx # 4-byte Reload
+ calll .LmulPv480x32
+ movl 80(%esp), %eax # 4-byte Reload
+ addl 1080(%esp), %eax
+ movl 84(%esp), %ecx # 4-byte Reload
+ adcl 1084(%esp), %ecx
+ adcl 1088(%esp), %esi
+ movl %esi, 84(%esp) # 4-byte Spill
+ adcl 1092(%esp), %edi
+ movl %edi, 64(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 1096(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 1100(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 1104(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ adcl 1108(%esp), %ebp
+ movl %ebp, 80(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 1112(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 1116(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 112(%esp), %eax # 4-byte Reload
+ adcl 1120(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 124(%esp), %eax # 4-byte Reload
+ adcl 1124(%esp), %eax
+ movl %eax, 124(%esp) # 4-byte Spill
+ movl 120(%esp), %eax # 4-byte Reload
+ adcl 1128(%esp), %eax
+ movl %eax, 120(%esp) # 4-byte Spill
+ movl 140(%esp), %eax # 4-byte Reload
+ adcl 1132(%esp), %eax
+ movl %eax, 140(%esp) # 4-byte Spill
+ movl 156(%esp), %eax # 4-byte Reload
+ adcl 1136(%esp), %eax
+ movl %eax, 156(%esp) # 4-byte Spill
+ movl 172(%esp), %eax # 4-byte Reload
+ adcl 1140(%esp), %eax
+ movl %eax, 172(%esp) # 4-byte Spill
+ adcl $0, 180(%esp) # 4-byte Folded Spill
+ adcl $0, 176(%esp) # 4-byte Folded Spill
+ adcl $0, 164(%esp) # 4-byte Folded Spill
+ adcl $0, 168(%esp) # 4-byte Folded Spill
+ adcl $0, 144(%esp) # 4-byte Folded Spill
+ adcl $0, 160(%esp) # 4-byte Folded Spill
+ movl 148(%esp), %edi # 4-byte Reload
+ adcl $0, %edi
+ adcl $0, 136(%esp) # 4-byte Folded Spill
+ adcl $0, 152(%esp) # 4-byte Folded Spill
+ adcl $0, 132(%esp) # 4-byte Folded Spill
+ adcl $0, 128(%esp) # 4-byte Folded Spill
+ adcl $0, 116(%esp) # 4-byte Folded Spill
+ adcl $0, 104(%esp) # 4-byte Folded Spill
+ adcl $0, 108(%esp) # 4-byte Folded Spill
+ sbbl %ebp, %ebp
+ movl %ecx, %eax
+ movl %ecx, %esi
+ imull 88(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 1016(%esp), %ecx
+ movl 1176(%esp), %edx
+ movl 92(%esp), %ebx # 4-byte Reload
+ calll .LmulPv480x32
+ andl $1, %ebp
+ movl %ebp, %ecx
+ addl 1016(%esp), %esi
+ movl 84(%esp), %edx # 4-byte Reload
+ adcl 1020(%esp), %edx
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 1024(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 1028(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 1032(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 1036(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 1040(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 1044(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 1048(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 112(%esp), %eax # 4-byte Reload
+ adcl 1052(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 124(%esp), %eax # 4-byte Reload
+ adcl 1056(%esp), %eax
+ movl %eax, 124(%esp) # 4-byte Spill
+ movl 120(%esp), %ebp # 4-byte Reload
+ adcl 1060(%esp), %ebp
+ movl 140(%esp), %eax # 4-byte Reload
+ adcl 1064(%esp), %eax
+ movl %eax, 140(%esp) # 4-byte Spill
+ movl 156(%esp), %eax # 4-byte Reload
+ adcl 1068(%esp), %eax
+ movl %eax, 156(%esp) # 4-byte Spill
+ movl 172(%esp), %eax # 4-byte Reload
+ adcl 1072(%esp), %eax
+ movl %eax, 172(%esp) # 4-byte Spill
+ movl 180(%esp), %eax # 4-byte Reload
+ adcl 1076(%esp), %eax
+ movl %eax, 180(%esp) # 4-byte Spill
+ adcl $0, 176(%esp) # 4-byte Folded Spill
+ adcl $0, 164(%esp) # 4-byte Folded Spill
+ adcl $0, 168(%esp) # 4-byte Folded Spill
+ movl 144(%esp), %eax # 4-byte Reload
+ adcl $0, %eax
+ movl %eax, %esi
+ adcl $0, 160(%esp) # 4-byte Folded Spill
+ adcl $0, %edi
+ movl %edi, 148(%esp) # 4-byte Spill
+ adcl $0, 136(%esp) # 4-byte Folded Spill
+ adcl $0, 152(%esp) # 4-byte Folded Spill
+ adcl $0, 132(%esp) # 4-byte Folded Spill
+ adcl $0, 128(%esp) # 4-byte Folded Spill
+ adcl $0, 116(%esp) # 4-byte Folded Spill
+ adcl $0, 104(%esp) # 4-byte Folded Spill
+ adcl $0, 108(%esp) # 4-byte Folded Spill
+ adcl $0, %ecx
+ movl %ecx, 84(%esp) # 4-byte Spill
+ movl %edx, %edi
+ movl %edi, %eax
+ imull 88(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 952(%esp), %ecx
+ movl 1176(%esp), %edx
+ movl 92(%esp), %ebx # 4-byte Reload
+ calll .LmulPv480x32
+ addl 952(%esp), %edi
+ movl 64(%esp), %ecx # 4-byte Reload
+ adcl 956(%esp), %ecx
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 960(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 964(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 968(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 972(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 976(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 980(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 112(%esp), %eax # 4-byte Reload
+ adcl 984(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 124(%esp), %eax # 4-byte Reload
+ adcl 988(%esp), %eax
+ movl %eax, 124(%esp) # 4-byte Spill
+ adcl 992(%esp), %ebp
+ movl %ebp, 120(%esp) # 4-byte Spill
+ movl 140(%esp), %eax # 4-byte Reload
+ adcl 996(%esp), %eax
+ movl %eax, 140(%esp) # 4-byte Spill
+ movl 156(%esp), %eax # 4-byte Reload
+ adcl 1000(%esp), %eax
+ movl %eax, 156(%esp) # 4-byte Spill
+ movl 172(%esp), %ebp # 4-byte Reload
+ adcl 1004(%esp), %ebp
+ movl 180(%esp), %eax # 4-byte Reload
+ adcl 1008(%esp), %eax
+ movl %eax, 180(%esp) # 4-byte Spill
+ movl 176(%esp), %eax # 4-byte Reload
+ adcl 1012(%esp), %eax
+ movl %eax, 176(%esp) # 4-byte Spill
+ adcl $0, 164(%esp) # 4-byte Folded Spill
+ adcl $0, 168(%esp) # 4-byte Folded Spill
+ adcl $0, %esi
+ movl %esi, 144(%esp) # 4-byte Spill
+ movl 160(%esp), %edi # 4-byte Reload
+ adcl $0, %edi
+ adcl $0, 148(%esp) # 4-byte Folded Spill
+ adcl $0, 136(%esp) # 4-byte Folded Spill
+ adcl $0, 152(%esp) # 4-byte Folded Spill
+ adcl $0, 132(%esp) # 4-byte Folded Spill
+ adcl $0, 128(%esp) # 4-byte Folded Spill
+ adcl $0, 116(%esp) # 4-byte Folded Spill
+ adcl $0, 104(%esp) # 4-byte Folded Spill
+ adcl $0, 108(%esp) # 4-byte Folded Spill
+ adcl $0, 84(%esp) # 4-byte Folded Spill
+ movl %ecx, %eax
+ movl %ecx, %esi
+ imull 88(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 888(%esp), %ecx
+ movl 1176(%esp), %edx
+ movl 92(%esp), %ebx # 4-byte Reload
+ calll .LmulPv480x32
+ addl 888(%esp), %esi
+ movl 68(%esp), %esi # 4-byte Reload
+ adcl 892(%esp), %esi
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 896(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 900(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 904(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 908(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 912(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 112(%esp), %eax # 4-byte Reload
+ adcl 916(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 124(%esp), %eax # 4-byte Reload
+ adcl 920(%esp), %eax
+ movl %eax, 124(%esp) # 4-byte Spill
+ movl 120(%esp), %eax # 4-byte Reload
+ adcl 924(%esp), %eax
+ movl %eax, 120(%esp) # 4-byte Spill
+ movl 140(%esp), %eax # 4-byte Reload
+ adcl 928(%esp), %eax
+ movl %eax, 140(%esp) # 4-byte Spill
+ movl 156(%esp), %eax # 4-byte Reload
+ adcl 932(%esp), %eax
+ movl %eax, 156(%esp) # 4-byte Spill
+ adcl 936(%esp), %ebp
+ movl %ebp, 172(%esp) # 4-byte Spill
+ movl 180(%esp), %eax # 4-byte Reload
+ adcl 940(%esp), %eax
+ movl %eax, 180(%esp) # 4-byte Spill
+ movl 176(%esp), %eax # 4-byte Reload
+ adcl 944(%esp), %eax
+ movl %eax, 176(%esp) # 4-byte Spill
+ movl 164(%esp), %eax # 4-byte Reload
+ adcl 948(%esp), %eax
+ movl %eax, 164(%esp) # 4-byte Spill
+ adcl $0, 168(%esp) # 4-byte Folded Spill
+ adcl $0, 144(%esp) # 4-byte Folded Spill
+ adcl $0, %edi
+ movl %edi, 160(%esp) # 4-byte Spill
+ adcl $0, 148(%esp) # 4-byte Folded Spill
+ adcl $0, 136(%esp) # 4-byte Folded Spill
+ adcl $0, 152(%esp) # 4-byte Folded Spill
+ movl 132(%esp), %ebp # 4-byte Reload
+ adcl $0, %ebp
+ adcl $0, 128(%esp) # 4-byte Folded Spill
+ adcl $0, 116(%esp) # 4-byte Folded Spill
+ movl 104(%esp), %edi # 4-byte Reload
+ adcl $0, %edi
+ adcl $0, 108(%esp) # 4-byte Folded Spill
+ adcl $0, 84(%esp) # 4-byte Folded Spill
+ movl %esi, %eax
+ imull 88(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 824(%esp), %ecx
+ movl 1176(%esp), %edx
+ movl 92(%esp), %ebx # 4-byte Reload
+ calll .LmulPv480x32
+ addl 824(%esp), %esi
+ movl 72(%esp), %ecx # 4-byte Reload
+ adcl 828(%esp), %ecx
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 832(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 836(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 840(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 844(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 112(%esp), %eax # 4-byte Reload
+ adcl 848(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 124(%esp), %eax # 4-byte Reload
+ adcl 852(%esp), %eax
+ movl %eax, 124(%esp) # 4-byte Spill
+ movl 120(%esp), %eax # 4-byte Reload
+ adcl 856(%esp), %eax
+ movl %eax, 120(%esp) # 4-byte Spill
+ movl 140(%esp), %eax # 4-byte Reload
+ adcl 860(%esp), %eax
+ movl %eax, 140(%esp) # 4-byte Spill
+ movl 156(%esp), %eax # 4-byte Reload
+ adcl 864(%esp), %eax
+ movl %eax, 156(%esp) # 4-byte Spill
+ movl 172(%esp), %eax # 4-byte Reload
+ adcl 868(%esp), %eax
+ movl %eax, 172(%esp) # 4-byte Spill
+ movl 180(%esp), %eax # 4-byte Reload
+ adcl 872(%esp), %eax
+ movl %eax, 180(%esp) # 4-byte Spill
+ movl 176(%esp), %eax # 4-byte Reload
+ adcl 876(%esp), %eax
+ movl %eax, 176(%esp) # 4-byte Spill
+ movl 164(%esp), %eax # 4-byte Reload
+ adcl 880(%esp), %eax
+ movl %eax, 164(%esp) # 4-byte Spill
+ movl 168(%esp), %eax # 4-byte Reload
+ adcl 884(%esp), %eax
+ movl %eax, 168(%esp) # 4-byte Spill
+ adcl $0, 144(%esp) # 4-byte Folded Spill
+ adcl $0, 160(%esp) # 4-byte Folded Spill
+ adcl $0, 148(%esp) # 4-byte Folded Spill
+ adcl $0, 136(%esp) # 4-byte Folded Spill
+ adcl $0, 152(%esp) # 4-byte Folded Spill
+ adcl $0, %ebp
+ movl %ebp, 132(%esp) # 4-byte Spill
+ adcl $0, 128(%esp) # 4-byte Folded Spill
+ adcl $0, 116(%esp) # 4-byte Folded Spill
+ adcl $0, %edi
+ movl %edi, 104(%esp) # 4-byte Spill
+ adcl $0, 108(%esp) # 4-byte Folded Spill
+ adcl $0, 84(%esp) # 4-byte Folded Spill
+ movl %ecx, %esi
+ movl %esi, %eax
+ imull 88(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 760(%esp), %ecx
+ movl 1176(%esp), %edx
+ movl 92(%esp), %ebx # 4-byte Reload
+ calll .LmulPv480x32
+ addl 760(%esp), %esi
+ movl 76(%esp), %esi # 4-byte Reload
+ adcl 764(%esp), %esi
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 768(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 772(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 776(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 112(%esp), %eax # 4-byte Reload
+ adcl 780(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 124(%esp), %eax # 4-byte Reload
+ adcl 784(%esp), %eax
+ movl %eax, 124(%esp) # 4-byte Spill
+ movl 120(%esp), %eax # 4-byte Reload
+ adcl 788(%esp), %eax
+ movl %eax, 120(%esp) # 4-byte Spill
+ movl 140(%esp), %eax # 4-byte Reload
+ adcl 792(%esp), %eax
+ movl %eax, 140(%esp) # 4-byte Spill
+ movl 156(%esp), %eax # 4-byte Reload
+ adcl 796(%esp), %eax
+ movl %eax, 156(%esp) # 4-byte Spill
+ movl 172(%esp), %eax # 4-byte Reload
+ adcl 800(%esp), %eax
+ movl %eax, 172(%esp) # 4-byte Spill
+ movl 180(%esp), %eax # 4-byte Reload
+ adcl 804(%esp), %eax
+ movl %eax, 180(%esp) # 4-byte Spill
+ movl 176(%esp), %eax # 4-byte Reload
+ adcl 808(%esp), %eax
+ movl %eax, 176(%esp) # 4-byte Spill
+ movl 164(%esp), %eax # 4-byte Reload
+ adcl 812(%esp), %eax
+ movl %eax, 164(%esp) # 4-byte Spill
+ movl 168(%esp), %eax # 4-byte Reload
+ adcl 816(%esp), %eax
+ movl %eax, 168(%esp) # 4-byte Spill
+ movl 144(%esp), %eax # 4-byte Reload
+ adcl 820(%esp), %eax
+ movl %eax, 144(%esp) # 4-byte Spill
+ adcl $0, 160(%esp) # 4-byte Folded Spill
+ adcl $0, 148(%esp) # 4-byte Folded Spill
+ adcl $0, 136(%esp) # 4-byte Folded Spill
+ movl 152(%esp), %ebp # 4-byte Reload
+ adcl $0, %ebp
+ adcl $0, 132(%esp) # 4-byte Folded Spill
+ movl 128(%esp), %edi # 4-byte Reload
+ adcl $0, %edi
+ adcl $0, 116(%esp) # 4-byte Folded Spill
+ adcl $0, 104(%esp) # 4-byte Folded Spill
+ adcl $0, 108(%esp) # 4-byte Folded Spill
+ adcl $0, 84(%esp) # 4-byte Folded Spill
+ movl %esi, %eax
+ imull 88(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 696(%esp), %ecx
+ movl 1176(%esp), %edx
+ movl 92(%esp), %ebx # 4-byte Reload
+ calll .LmulPv480x32
+ addl 696(%esp), %esi
+ movl 80(%esp), %ecx # 4-byte Reload
+ adcl 700(%esp), %ecx
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 704(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 708(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 112(%esp), %eax # 4-byte Reload
+ adcl 712(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 124(%esp), %eax # 4-byte Reload
+ adcl 716(%esp), %eax
+ movl %eax, 124(%esp) # 4-byte Spill
+ movl 120(%esp), %eax # 4-byte Reload
+ adcl 720(%esp), %eax
+ movl %eax, 120(%esp) # 4-byte Spill
+ movl 140(%esp), %eax # 4-byte Reload
+ adcl 724(%esp), %eax
+ movl %eax, 140(%esp) # 4-byte Spill
+ movl 156(%esp), %eax # 4-byte Reload
+ adcl 728(%esp), %eax
+ movl %eax, 156(%esp) # 4-byte Spill
+ movl 172(%esp), %eax # 4-byte Reload
+ adcl 732(%esp), %eax
+ movl %eax, 172(%esp) # 4-byte Spill
+ movl 180(%esp), %eax # 4-byte Reload
+ adcl 736(%esp), %eax
+ movl %eax, 180(%esp) # 4-byte Spill
+ movl 176(%esp), %eax # 4-byte Reload
+ adcl 740(%esp), %eax
+ movl %eax, 176(%esp) # 4-byte Spill
+ movl 164(%esp), %eax # 4-byte Reload
+ adcl 744(%esp), %eax
+ movl %eax, 164(%esp) # 4-byte Spill
+ movl 168(%esp), %eax # 4-byte Reload
+ adcl 748(%esp), %eax
+ movl %eax, 168(%esp) # 4-byte Spill
+ movl 144(%esp), %eax # 4-byte Reload
+ adcl 752(%esp), %eax
+ movl %eax, 144(%esp) # 4-byte Spill
+ movl 160(%esp), %eax # 4-byte Reload
+ adcl 756(%esp), %eax
+ movl %eax, 160(%esp) # 4-byte Spill
+ adcl $0, 148(%esp) # 4-byte Folded Spill
+ adcl $0, 136(%esp) # 4-byte Folded Spill
+ adcl $0, %ebp
+ movl %ebp, 152(%esp) # 4-byte Spill
+ adcl $0, 132(%esp) # 4-byte Folded Spill
+ adcl $0, %edi
+ movl %edi, 128(%esp) # 4-byte Spill
+ adcl $0, 116(%esp) # 4-byte Folded Spill
+ adcl $0, 104(%esp) # 4-byte Folded Spill
+ movl 108(%esp), %esi # 4-byte Reload
+ adcl $0, %esi
+ adcl $0, 84(%esp) # 4-byte Folded Spill
+ movl %ecx, %edi
+ movl %edi, %eax
+ imull 88(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 632(%esp), %ecx
+ movl 1176(%esp), %edx
+ movl 92(%esp), %ebx # 4-byte Reload
+ calll .LmulPv480x32
+ addl 632(%esp), %edi
+ movl 96(%esp), %ecx # 4-byte Reload
+ adcl 636(%esp), %ecx
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 640(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 112(%esp), %eax # 4-byte Reload
+ adcl 644(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 124(%esp), %eax # 4-byte Reload
+ adcl 648(%esp), %eax
+ movl %eax, 124(%esp) # 4-byte Spill
+ movl 120(%esp), %eax # 4-byte Reload
+ adcl 652(%esp), %eax
+ movl %eax, 120(%esp) # 4-byte Spill
+ movl 140(%esp), %eax # 4-byte Reload
+ adcl 656(%esp), %eax
+ movl %eax, 140(%esp) # 4-byte Spill
+ movl 156(%esp), %eax # 4-byte Reload
+ adcl 660(%esp), %eax
+ movl %eax, 156(%esp) # 4-byte Spill
+ movl 172(%esp), %eax # 4-byte Reload
+ adcl 664(%esp), %eax
+ movl %eax, 172(%esp) # 4-byte Spill
+ movl 180(%esp), %eax # 4-byte Reload
+ adcl 668(%esp), %eax
+ movl %eax, 180(%esp) # 4-byte Spill
+ movl 176(%esp), %ebp # 4-byte Reload
+ adcl 672(%esp), %ebp
+ movl 164(%esp), %edi # 4-byte Reload
+ adcl 676(%esp), %edi
+ movl 168(%esp), %eax # 4-byte Reload
+ adcl 680(%esp), %eax
+ movl %eax, 168(%esp) # 4-byte Spill
+ movl 144(%esp), %eax # 4-byte Reload
+ adcl 684(%esp), %eax
+ movl %eax, 144(%esp) # 4-byte Spill
+ movl 160(%esp), %eax # 4-byte Reload
+ adcl 688(%esp), %eax
+ movl %eax, 160(%esp) # 4-byte Spill
+ movl 148(%esp), %eax # 4-byte Reload
+ adcl 692(%esp), %eax
+ movl %eax, 148(%esp) # 4-byte Spill
+ adcl $0, 136(%esp) # 4-byte Folded Spill
+ adcl $0, 152(%esp) # 4-byte Folded Spill
+ adcl $0, 132(%esp) # 4-byte Folded Spill
+ adcl $0, 128(%esp) # 4-byte Folded Spill
+ adcl $0, 116(%esp) # 4-byte Folded Spill
+ adcl $0, 104(%esp) # 4-byte Folded Spill
+ adcl $0, %esi
+ movl %esi, 108(%esp) # 4-byte Spill
+ adcl $0, 84(%esp) # 4-byte Folded Spill
+ movl %ecx, %eax
+ movl %ecx, %esi
+ imull 88(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 568(%esp), %ecx
+ movl 1176(%esp), %edx
+ movl 92(%esp), %ebx # 4-byte Reload
+ calll .LmulPv480x32
+ addl 568(%esp), %esi
+ movl 100(%esp), %ecx # 4-byte Reload
+ adcl 572(%esp), %ecx
+ movl 112(%esp), %eax # 4-byte Reload
+ adcl 576(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 124(%esp), %eax # 4-byte Reload
+ adcl 580(%esp), %eax
+ movl %eax, 124(%esp) # 4-byte Spill
+ movl 120(%esp), %eax # 4-byte Reload
+ adcl 584(%esp), %eax
+ movl %eax, 120(%esp) # 4-byte Spill
+ movl 140(%esp), %eax # 4-byte Reload
+ adcl 588(%esp), %eax
+ movl %eax, 140(%esp) # 4-byte Spill
+ movl 156(%esp), %eax # 4-byte Reload
+ adcl 592(%esp), %eax
+ movl %eax, 156(%esp) # 4-byte Spill
+ movl 172(%esp), %eax # 4-byte Reload
+ adcl 596(%esp), %eax
+ movl %eax, 172(%esp) # 4-byte Spill
+ movl 180(%esp), %eax # 4-byte Reload
+ adcl 600(%esp), %eax
+ movl %eax, 180(%esp) # 4-byte Spill
+ adcl 604(%esp), %ebp
+ movl %ebp, 176(%esp) # 4-byte Spill
+ adcl 608(%esp), %edi
+ movl %edi, 164(%esp) # 4-byte Spill
+ movl 168(%esp), %eax # 4-byte Reload
+ adcl 612(%esp), %eax
+ movl %eax, 168(%esp) # 4-byte Spill
+ movl 144(%esp), %edi # 4-byte Reload
+ adcl 616(%esp), %edi
+ movl 160(%esp), %eax # 4-byte Reload
+ adcl 620(%esp), %eax
+ movl %eax, 160(%esp) # 4-byte Spill
+ movl 148(%esp), %eax # 4-byte Reload
+ adcl 624(%esp), %eax
+ movl %eax, 148(%esp) # 4-byte Spill
+ movl 136(%esp), %eax # 4-byte Reload
+ adcl 628(%esp), %eax
+ movl %eax, 136(%esp) # 4-byte Spill
+ adcl $0, 152(%esp) # 4-byte Folded Spill
+ adcl $0, 132(%esp) # 4-byte Folded Spill
+ adcl $0, 128(%esp) # 4-byte Folded Spill
+ adcl $0, 116(%esp) # 4-byte Folded Spill
+ adcl $0, 104(%esp) # 4-byte Folded Spill
+ adcl $0, 108(%esp) # 4-byte Folded Spill
+ movl 84(%esp), %ebp # 4-byte Reload
+ adcl $0, %ebp
+ movl %ecx, %esi
+ movl %esi, %eax
+ imull 88(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 504(%esp), %ecx
+ movl 1176(%esp), %eax
+ movl %eax, %edx
+ movl 92(%esp), %ebx # 4-byte Reload
+ calll .LmulPv480x32
+ addl 504(%esp), %esi
+ movl 112(%esp), %ecx # 4-byte Reload
+ adcl 508(%esp), %ecx
+ movl 124(%esp), %eax # 4-byte Reload
+ adcl 512(%esp), %eax
+ movl %eax, 124(%esp) # 4-byte Spill
+ movl 120(%esp), %eax # 4-byte Reload
+ adcl 516(%esp), %eax
+ movl %eax, 120(%esp) # 4-byte Spill
+ movl 140(%esp), %eax # 4-byte Reload
+ adcl 520(%esp), %eax
+ movl %eax, 140(%esp) # 4-byte Spill
+ movl 156(%esp), %esi # 4-byte Reload
+ adcl 524(%esp), %esi
+ movl 172(%esp), %eax # 4-byte Reload
+ adcl 528(%esp), %eax
+ movl %eax, 172(%esp) # 4-byte Spill
+ movl 180(%esp), %eax # 4-byte Reload
+ adcl 532(%esp), %eax
+ movl %eax, 180(%esp) # 4-byte Spill
+ movl 176(%esp), %eax # 4-byte Reload
+ adcl 536(%esp), %eax
+ movl %eax, 176(%esp) # 4-byte Spill
+ movl 164(%esp), %eax # 4-byte Reload
+ adcl 540(%esp), %eax
+ movl %eax, 164(%esp) # 4-byte Spill
+ movl 168(%esp), %eax # 4-byte Reload
+ adcl 544(%esp), %eax
+ movl %eax, 168(%esp) # 4-byte Spill
+ adcl 548(%esp), %edi
+ movl %edi, 144(%esp) # 4-byte Spill
+ movl 160(%esp), %eax # 4-byte Reload
+ adcl 552(%esp), %eax
+ movl %eax, 160(%esp) # 4-byte Spill
+ movl 148(%esp), %eax # 4-byte Reload
+ adcl 556(%esp), %eax
+ movl %eax, 148(%esp) # 4-byte Spill
+ movl 136(%esp), %eax # 4-byte Reload
+ adcl 560(%esp), %eax
+ movl %eax, 136(%esp) # 4-byte Spill
+ movl 152(%esp), %eax # 4-byte Reload
+ adcl 564(%esp), %eax
+ movl %eax, 152(%esp) # 4-byte Spill
+ adcl $0, 132(%esp) # 4-byte Folded Spill
+ adcl $0, 128(%esp) # 4-byte Folded Spill
+ adcl $0, 116(%esp) # 4-byte Folded Spill
+ adcl $0, 104(%esp) # 4-byte Folded Spill
+ adcl $0, 108(%esp) # 4-byte Folded Spill
+ adcl $0, %ebp
+ movl %ecx, %edi
+ movl %edi, %eax
+ imull 88(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 440(%esp), %ecx
+ movl 1176(%esp), %edx
+ movl 92(%esp), %ebx # 4-byte Reload
+ calll .LmulPv480x32
+ addl 440(%esp), %edi
+ movl 124(%esp), %ecx # 4-byte Reload
+ adcl 444(%esp), %ecx
+ movl 120(%esp), %eax # 4-byte Reload
+ adcl 448(%esp), %eax
+ movl %eax, 120(%esp) # 4-byte Spill
+ movl 140(%esp), %edi # 4-byte Reload
+ adcl 452(%esp), %edi
+ adcl 456(%esp), %esi
+ movl %esi, 156(%esp) # 4-byte Spill
+ movl 172(%esp), %eax # 4-byte Reload
+ adcl 460(%esp), %eax
+ movl %eax, 172(%esp) # 4-byte Spill
+ movl 180(%esp), %esi # 4-byte Reload
+ adcl 464(%esp), %esi
+ movl 176(%esp), %eax # 4-byte Reload
+ adcl 468(%esp), %eax
+ movl %eax, 176(%esp) # 4-byte Spill
+ movl 164(%esp), %eax # 4-byte Reload
+ adcl 472(%esp), %eax
+ movl %eax, 164(%esp) # 4-byte Spill
+ movl 168(%esp), %eax # 4-byte Reload
+ adcl 476(%esp), %eax
+ movl %eax, 168(%esp) # 4-byte Spill
+ movl 144(%esp), %eax # 4-byte Reload
+ adcl 480(%esp), %eax
+ movl %eax, 144(%esp) # 4-byte Spill
+ movl 160(%esp), %eax # 4-byte Reload
+ adcl 484(%esp), %eax
+ movl %eax, 160(%esp) # 4-byte Spill
+ movl 148(%esp), %eax # 4-byte Reload
+ adcl 488(%esp), %eax
+ movl %eax, 148(%esp) # 4-byte Spill
+ movl 136(%esp), %eax # 4-byte Reload
+ adcl 492(%esp), %eax
+ movl %eax, 136(%esp) # 4-byte Spill
+ movl 152(%esp), %eax # 4-byte Reload
+ adcl 496(%esp), %eax
+ movl %eax, 152(%esp) # 4-byte Spill
+ movl 132(%esp), %eax # 4-byte Reload
+ adcl 500(%esp), %eax
+ movl %eax, 132(%esp) # 4-byte Spill
+ adcl $0, 128(%esp) # 4-byte Folded Spill
+ adcl $0, 116(%esp) # 4-byte Folded Spill
+ adcl $0, 104(%esp) # 4-byte Folded Spill
+ adcl $0, 108(%esp) # 4-byte Folded Spill
+ adcl $0, %ebp
+ movl %ebp, 84(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ movl %ecx, %ebp
+ imull 88(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 376(%esp), %ecx
+ movl 1176(%esp), %edx
+ movl 92(%esp), %ebx # 4-byte Reload
+ calll .LmulPv480x32
+ addl 376(%esp), %ebp
+ movl 120(%esp), %ebp # 4-byte Reload
+ adcl 380(%esp), %ebp
+ adcl 384(%esp), %edi
+ movl %edi, 140(%esp) # 4-byte Spill
+ movl 156(%esp), %eax # 4-byte Reload
+ adcl 388(%esp), %eax
+ movl %eax, 156(%esp) # 4-byte Spill
+ movl 172(%esp), %edi # 4-byte Reload
+ adcl 392(%esp), %edi
+ adcl 396(%esp), %esi
+ movl %esi, 180(%esp) # 4-byte Spill
+ movl 176(%esp), %eax # 4-byte Reload
+ adcl 400(%esp), %eax
+ movl %eax, 176(%esp) # 4-byte Spill
+ movl 164(%esp), %eax # 4-byte Reload
+ adcl 404(%esp), %eax
+ movl %eax, 164(%esp) # 4-byte Spill
+ movl 168(%esp), %eax # 4-byte Reload
+ adcl 408(%esp), %eax
+ movl %eax, 168(%esp) # 4-byte Spill
+ movl 144(%esp), %esi # 4-byte Reload
+ adcl 412(%esp), %esi
+ movl 160(%esp), %eax # 4-byte Reload
+ adcl 416(%esp), %eax
+ movl %eax, 160(%esp) # 4-byte Spill
+ movl 148(%esp), %eax # 4-byte Reload
+ adcl 420(%esp), %eax
+ movl %eax, 148(%esp) # 4-byte Spill
+ movl 136(%esp), %eax # 4-byte Reload
+ adcl 424(%esp), %eax
+ movl %eax, 136(%esp) # 4-byte Spill
+ movl 152(%esp), %eax # 4-byte Reload
+ adcl 428(%esp), %eax
+ movl %eax, 152(%esp) # 4-byte Spill
+ movl 132(%esp), %eax # 4-byte Reload
+ adcl 432(%esp), %eax
+ movl %eax, 132(%esp) # 4-byte Spill
+ movl 128(%esp), %eax # 4-byte Reload
+ adcl 436(%esp), %eax
+ movl %eax, 128(%esp) # 4-byte Spill
+ adcl $0, 116(%esp) # 4-byte Folded Spill
+ adcl $0, 104(%esp) # 4-byte Folded Spill
+ adcl $0, 108(%esp) # 4-byte Folded Spill
+ adcl $0, 84(%esp) # 4-byte Folded Spill
+ movl %ebp, %eax
+ imull 88(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 312(%esp), %ecx
+ movl 1176(%esp), %edx
+ movl 92(%esp), %ebx # 4-byte Reload
+ calll .LmulPv480x32
+ addl 312(%esp), %ebp
+ movl 140(%esp), %eax # 4-byte Reload
+ adcl 316(%esp), %eax
+ movl 156(%esp), %ecx # 4-byte Reload
+ adcl 320(%esp), %ecx
+ movl %ecx, 156(%esp) # 4-byte Spill
+ adcl 324(%esp), %edi
+ movl %edi, 172(%esp) # 4-byte Spill
+ movl 180(%esp), %ecx # 4-byte Reload
+ adcl 328(%esp), %ecx
+ movl %ecx, 180(%esp) # 4-byte Spill
+ movl 176(%esp), %ecx # 4-byte Reload
+ adcl 332(%esp), %ecx
+ movl %ecx, 176(%esp) # 4-byte Spill
+ movl 164(%esp), %ecx # 4-byte Reload
+ adcl 336(%esp), %ecx
+ movl %ecx, 164(%esp) # 4-byte Spill
+ movl 168(%esp), %ecx # 4-byte Reload
+ adcl 340(%esp), %ecx
+ movl %ecx, 168(%esp) # 4-byte Spill
+ adcl 344(%esp), %esi
+ movl %esi, 144(%esp) # 4-byte Spill
+ movl 160(%esp), %ecx # 4-byte Reload
+ adcl 348(%esp), %ecx
+ movl %ecx, 160(%esp) # 4-byte Spill
+ movl 148(%esp), %ebp # 4-byte Reload
+ adcl 352(%esp), %ebp
+ movl 136(%esp), %ecx # 4-byte Reload
+ adcl 356(%esp), %ecx
+ movl %ecx, 136(%esp) # 4-byte Spill
+ movl 152(%esp), %ecx # 4-byte Reload
+ adcl 360(%esp), %ecx
+ movl %ecx, 152(%esp) # 4-byte Spill
+ movl 132(%esp), %ecx # 4-byte Reload
+ adcl 364(%esp), %ecx
+ movl %ecx, 132(%esp) # 4-byte Spill
+ movl 128(%esp), %ecx # 4-byte Reload
+ adcl 368(%esp), %ecx
+ movl %ecx, 128(%esp) # 4-byte Spill
+ movl 116(%esp), %ecx # 4-byte Reload
+ adcl 372(%esp), %ecx
+ movl %ecx, 116(%esp) # 4-byte Spill
+ adcl $0, 104(%esp) # 4-byte Folded Spill
+ adcl $0, 108(%esp) # 4-byte Folded Spill
+ movl 84(%esp), %esi # 4-byte Reload
+ adcl $0, %esi
+ movl %eax, %edi
+ imull 88(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 248(%esp), %ecx
+ movl 1176(%esp), %edx
+ movl 92(%esp), %ebx # 4-byte Reload
+ calll .LmulPv480x32
+ addl 248(%esp), %edi
+ movl 156(%esp), %ecx # 4-byte Reload
+ adcl 252(%esp), %ecx
+ movl 172(%esp), %eax # 4-byte Reload
+ adcl 256(%esp), %eax
+ movl %eax, 172(%esp) # 4-byte Spill
+ movl 180(%esp), %eax # 4-byte Reload
+ adcl 260(%esp), %eax
+ movl %eax, 180(%esp) # 4-byte Spill
+ movl 176(%esp), %eax # 4-byte Reload
+ adcl 264(%esp), %eax
+ movl %eax, 176(%esp) # 4-byte Spill
+ movl 164(%esp), %eax # 4-byte Reload
+ adcl 268(%esp), %eax
+ movl %eax, 164(%esp) # 4-byte Spill
+ movl 168(%esp), %eax # 4-byte Reload
+ adcl 272(%esp), %eax
+ movl %eax, 168(%esp) # 4-byte Spill
+ movl 144(%esp), %eax # 4-byte Reload
+ adcl 276(%esp), %eax
+ movl %eax, 144(%esp) # 4-byte Spill
+ movl 160(%esp), %eax # 4-byte Reload
+ adcl 280(%esp), %eax
+ movl %eax, 160(%esp) # 4-byte Spill
+ adcl 284(%esp), %ebp
+ movl %ebp, 148(%esp) # 4-byte Spill
+ movl 136(%esp), %edi # 4-byte Reload
+ adcl 288(%esp), %edi
+ movl 152(%esp), %eax # 4-byte Reload
+ adcl 292(%esp), %eax
+ movl %eax, 152(%esp) # 4-byte Spill
+ movl 132(%esp), %eax # 4-byte Reload
+ adcl 296(%esp), %eax
+ movl %eax, 132(%esp) # 4-byte Spill
+ movl 128(%esp), %eax # 4-byte Reload
+ adcl 300(%esp), %eax
+ movl %eax, 128(%esp) # 4-byte Spill
+ movl 116(%esp), %eax # 4-byte Reload
+ adcl 304(%esp), %eax
+ movl %eax, 116(%esp) # 4-byte Spill
+ movl 104(%esp), %ebp # 4-byte Reload
+ adcl 308(%esp), %ebp
+ adcl $0, 108(%esp) # 4-byte Folded Spill
+ adcl $0, %esi
+ movl %esi, 84(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ imull %ecx, %eax
+ movl %ecx, %esi
+ movl %eax, (%esp)
+ leal 184(%esp), %ecx
+ movl 1176(%esp), %edx
+ movl 92(%esp), %ebx # 4-byte Reload
+ calll .LmulPv480x32
+ addl 184(%esp), %esi
+ movl 172(%esp), %edx # 4-byte Reload
+ adcl 188(%esp), %edx
+ movl %edx, 172(%esp) # 4-byte Spill
+ movl 180(%esp), %ecx # 4-byte Reload
+ adcl 192(%esp), %ecx
+ movl %ecx, 180(%esp) # 4-byte Spill
+ movl 176(%esp), %eax # 4-byte Reload
+ adcl 196(%esp), %eax
+ movl %eax, 176(%esp) # 4-byte Spill
+ movl 164(%esp), %eax # 4-byte Reload
+ adcl 200(%esp), %eax
+ movl %eax, 164(%esp) # 4-byte Spill
+ movl 168(%esp), %esi # 4-byte Reload
+ adcl 204(%esp), %esi
+ movl %esi, 168(%esp) # 4-byte Spill
+ movl 144(%esp), %eax # 4-byte Reload
+ adcl 208(%esp), %eax
+ movl %eax, 144(%esp) # 4-byte Spill
+ movl 160(%esp), %eax # 4-byte Reload
+ adcl 212(%esp), %eax
+ movl %eax, 160(%esp) # 4-byte Spill
+ movl 148(%esp), %eax # 4-byte Reload
+ adcl 216(%esp), %eax
+ movl %eax, 148(%esp) # 4-byte Spill
+ adcl 220(%esp), %edi
+ movl %edi, 136(%esp) # 4-byte Spill
+ movl 152(%esp), %eax # 4-byte Reload
+ adcl 224(%esp), %eax
+ movl %eax, 152(%esp) # 4-byte Spill
+ movl 132(%esp), %eax # 4-byte Reload
+ adcl 228(%esp), %eax
+ movl %eax, 132(%esp) # 4-byte Spill
+ movl 128(%esp), %eax # 4-byte Reload
+ adcl 232(%esp), %eax
+ movl %eax, 128(%esp) # 4-byte Spill
+ movl 116(%esp), %eax # 4-byte Reload
+ adcl 236(%esp), %eax
+ movl %eax, 116(%esp) # 4-byte Spill
+ adcl 240(%esp), %ebp
+ movl %ebp, 104(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 244(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 84(%esp), %ebx # 4-byte Reload
+ adcl $0, %ebx
+ movl %edx, %eax
+ subl 16(%esp), %edx # 4-byte Folded Reload
+ sbbl 4(%esp), %ecx # 4-byte Folded Reload
+ movl 176(%esp), %eax # 4-byte Reload
+ sbbl 8(%esp), %eax # 4-byte Folded Reload
+ movl 164(%esp), %ebp # 4-byte Reload
+ sbbl 12(%esp), %ebp # 4-byte Folded Reload
+ sbbl 20(%esp), %esi # 4-byte Folded Reload
+ movl 144(%esp), %edi # 4-byte Reload
+ sbbl 24(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 84(%esp) # 4-byte Spill
+ movl 160(%esp), %edi # 4-byte Reload
+ sbbl 28(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 88(%esp) # 4-byte Spill
+ movl 148(%esp), %edi # 4-byte Reload
+ sbbl 32(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 92(%esp) # 4-byte Spill
+ movl 136(%esp), %edi # 4-byte Reload
+ sbbl 36(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 96(%esp) # 4-byte Spill
+ movl 152(%esp), %edi # 4-byte Reload
+ sbbl 40(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 100(%esp) # 4-byte Spill
+ movl 132(%esp), %edi # 4-byte Reload
+ sbbl 44(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 112(%esp) # 4-byte Spill
+ movl 128(%esp), %edi # 4-byte Reload
+ sbbl 48(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 120(%esp) # 4-byte Spill
+ movl 116(%esp), %edi # 4-byte Reload
+ sbbl 52(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 124(%esp) # 4-byte Spill
+ movl 104(%esp), %edi # 4-byte Reload
+ sbbl 56(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 140(%esp) # 4-byte Spill
+ movl 108(%esp), %edi # 4-byte Reload
+ sbbl 60(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 156(%esp) # 4-byte Spill
+ sbbl $0, %ebx
+ andl $1, %ebx
+ movl %ebx, %edi
+ jne .LBB228_2
+# BB#1:
+ movl %edx, 172(%esp) # 4-byte Spill
+.LBB228_2:
+ movl 1168(%esp), %edx
+ movl 172(%esp), %ebx # 4-byte Reload
+ movl %ebx, (%edx)
+ movl %edi, %ebx
+ testb %bl, %bl
+ jne .LBB228_4
+# BB#3:
+ movl %ecx, 180(%esp) # 4-byte Spill
+.LBB228_4:
+ movl 180(%esp), %ecx # 4-byte Reload
+ movl %ecx, 4(%edx)
+ movl 176(%esp), %ecx # 4-byte Reload
+ jne .LBB228_6
+# BB#5:
+ movl %eax, %ecx
+.LBB228_6:
+ movl %ecx, 8(%edx)
+ movl 164(%esp), %eax # 4-byte Reload
+ jne .LBB228_8
+# BB#7:
+ movl %ebp, %eax
+.LBB228_8:
+ movl %eax, 12(%edx)
+ movl 108(%esp), %ecx # 4-byte Reload
+ movl 148(%esp), %eax # 4-byte Reload
+ movl 168(%esp), %ebp # 4-byte Reload
+ jne .LBB228_10
+# BB#9:
+ movl %esi, %ebp
+.LBB228_10:
+ movl %ebp, 16(%edx)
+ movl 152(%esp), %ebp # 4-byte Reload
+ movl 144(%esp), %ebx # 4-byte Reload
+ jne .LBB228_12
+# BB#11:
+ movl 84(%esp), %ebx # 4-byte Reload
+.LBB228_12:
+ movl %ebx, 20(%edx)
+ movl 132(%esp), %ebx # 4-byte Reload
+ movl 160(%esp), %edi # 4-byte Reload
+ jne .LBB228_14
+# BB#13:
+ movl 88(%esp), %edi # 4-byte Reload
+.LBB228_14:
+ movl %edi, 24(%edx)
+ movl 128(%esp), %edi # 4-byte Reload
+ jne .LBB228_16
+# BB#15:
+ movl 92(%esp), %eax # 4-byte Reload
+.LBB228_16:
+ movl %eax, 28(%edx)
+ movl 116(%esp), %esi # 4-byte Reload
+ jne .LBB228_18
+# BB#17:
+ movl 96(%esp), %eax # 4-byte Reload
+ movl %eax, 136(%esp) # 4-byte Spill
+.LBB228_18:
+ movl 136(%esp), %eax # 4-byte Reload
+ movl %eax, 32(%edx)
+ jne .LBB228_20
+# BB#19:
+ movl 100(%esp), %ebp # 4-byte Reload
+.LBB228_20:
+ movl %ebp, 36(%edx)
+ movl 104(%esp), %eax # 4-byte Reload
+ jne .LBB228_22
+# BB#21:
+ movl 112(%esp), %ebx # 4-byte Reload
+.LBB228_22:
+ movl %ebx, 40(%edx)
+ jne .LBB228_24
+# BB#23:
+ movl 120(%esp), %edi # 4-byte Reload
+.LBB228_24:
+ movl %edi, 44(%edx)
+ jne .LBB228_26
+# BB#25:
+ movl 124(%esp), %esi # 4-byte Reload
+.LBB228_26:
+ movl %esi, 48(%edx)
+ jne .LBB228_28
+# BB#27:
+ movl 140(%esp), %eax # 4-byte Reload
+.LBB228_28:
+ movl %eax, 52(%edx)
+ jne .LBB228_30
+# BB#29:
+ movl 156(%esp), %ecx # 4-byte Reload
+.LBB228_30:
+ movl %ecx, 56(%edx)
+ addl $1148, %esp # imm = 0x47C
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end228:
+ .size mcl_fp_montRed15L, .Lfunc_end228-mcl_fp_montRed15L
+
+ .globl mcl_fp_addPre15L
+ .align 16, 0x90
+ .type mcl_fp_addPre15L,@function
+mcl_fp_addPre15L: # @mcl_fp_addPre15L
+# BB#0:
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ movl 24(%esp), %eax
+ movl (%eax), %edx
+ movl 4(%eax), %esi
+ movl 20(%esp), %ecx
+ addl (%ecx), %edx
+ adcl 4(%ecx), %esi
+ movl 8(%eax), %ebx
+ adcl 8(%ecx), %ebx
+ movl 16(%esp), %edi
+ movl %edx, (%edi)
+ movl 12(%ecx), %edx
+ movl %esi, 4(%edi)
+ movl 16(%ecx), %esi
+ adcl 12(%eax), %edx
+ adcl 16(%eax), %esi
+ movl %ebx, 8(%edi)
+ movl 20(%eax), %ebx
+ movl %edx, 12(%edi)
+ movl 20(%ecx), %edx
+ adcl %ebx, %edx
+ movl 24(%eax), %ebx
+ movl %esi, 16(%edi)
+ movl 24(%ecx), %esi
+ adcl %ebx, %esi
+ movl 28(%eax), %ebx
+ movl %edx, 20(%edi)
+ movl 28(%ecx), %edx
+ adcl %ebx, %edx
+ movl 32(%eax), %ebx
+ movl %esi, 24(%edi)
+ movl 32(%ecx), %esi
+ adcl %ebx, %esi
+ movl 36(%eax), %ebx
+ movl %edx, 28(%edi)
+ movl 36(%ecx), %edx
+ adcl %ebx, %edx
+ movl 40(%eax), %ebx
+ movl %esi, 32(%edi)
+ movl 40(%ecx), %esi
+ adcl %ebx, %esi
+ movl 44(%eax), %ebx
+ movl %edx, 36(%edi)
+ movl 44(%ecx), %edx
+ adcl %ebx, %edx
+ movl 48(%eax), %ebx
+ movl %esi, 40(%edi)
+ movl 48(%ecx), %esi
+ adcl %ebx, %esi
+ movl 52(%eax), %ebx
+ movl %edx, 44(%edi)
+ movl 52(%ecx), %edx
+ adcl %ebx, %edx
+ movl %esi, 48(%edi)
+ movl %edx, 52(%edi)
+ movl 56(%eax), %eax
+ movl 56(%ecx), %ecx
+ adcl %eax, %ecx
+ movl %ecx, 56(%edi)
+ sbbl %eax, %eax
+ andl $1, %eax
+ popl %esi
+ popl %edi
+ popl %ebx
+ retl
+.Lfunc_end229:
+ .size mcl_fp_addPre15L, .Lfunc_end229-mcl_fp_addPre15L
+
+ .globl mcl_fp_subPre15L
+ .align 16, 0x90
+ .type mcl_fp_subPre15L,@function
+mcl_fp_subPre15L: # @mcl_fp_subPre15L
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ movl 24(%esp), %ecx
+ movl (%ecx), %esi
+ movl 4(%ecx), %edi
+ xorl %eax, %eax
+ movl 28(%esp), %edx
+ subl (%edx), %esi
+ sbbl 4(%edx), %edi
+ movl 8(%ecx), %ebp
+ sbbl 8(%edx), %ebp
+ movl 20(%esp), %ebx
+ movl %esi, (%ebx)
+ movl 12(%ecx), %esi
+ sbbl 12(%edx), %esi
+ movl %edi, 4(%ebx)
+ movl 16(%ecx), %edi
+ sbbl 16(%edx), %edi
+ movl %ebp, 8(%ebx)
+ movl 20(%edx), %ebp
+ movl %esi, 12(%ebx)
+ movl 20(%ecx), %esi
+ sbbl %ebp, %esi
+ movl 24(%edx), %ebp
+ movl %edi, 16(%ebx)
+ movl 24(%ecx), %edi
+ sbbl %ebp, %edi
+ movl 28(%edx), %ebp
+ movl %esi, 20(%ebx)
+ movl 28(%ecx), %esi
+ sbbl %ebp, %esi
+ movl 32(%edx), %ebp
+ movl %edi, 24(%ebx)
+ movl 32(%ecx), %edi
+ sbbl %ebp, %edi
+ movl 36(%edx), %ebp
+ movl %esi, 28(%ebx)
+ movl 36(%ecx), %esi
+ sbbl %ebp, %esi
+ movl 40(%edx), %ebp
+ movl %edi, 32(%ebx)
+ movl 40(%ecx), %edi
+ sbbl %ebp, %edi
+ movl 44(%edx), %ebp
+ movl %esi, 36(%ebx)
+ movl 44(%ecx), %esi
+ sbbl %ebp, %esi
+ movl 48(%edx), %ebp
+ movl %edi, 40(%ebx)
+ movl 48(%ecx), %edi
+ sbbl %ebp, %edi
+ movl 52(%edx), %ebp
+ movl %esi, 44(%ebx)
+ movl 52(%ecx), %esi
+ sbbl %ebp, %esi
+ movl %edi, 48(%ebx)
+ movl %esi, 52(%ebx)
+ movl 56(%edx), %edx
+ movl 56(%ecx), %ecx
+ sbbl %edx, %ecx
+ movl %ecx, 56(%ebx)
+ sbbl $0, %eax
+ andl $1, %eax
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end230:
+ .size mcl_fp_subPre15L, .Lfunc_end230-mcl_fp_subPre15L
+
+ .globl mcl_fp_shr1_15L
+ .align 16, 0x90
+ .type mcl_fp_shr1_15L,@function
+mcl_fp_shr1_15L: # @mcl_fp_shr1_15L
+# BB#0:
+ pushl %esi
+ movl 12(%esp), %eax
+ movl (%eax), %edx
+ movl 4(%eax), %esi
+ shrdl $1, %esi, %edx
+ movl 8(%esp), %ecx
+ movl %edx, (%ecx)
+ movl 8(%eax), %edx
+ shrdl $1, %edx, %esi
+ movl %esi, 4(%ecx)
+ movl 12(%eax), %esi
+ shrdl $1, %esi, %edx
+ movl %edx, 8(%ecx)
+ movl 16(%eax), %edx
+ shrdl $1, %edx, %esi
+ movl %esi, 12(%ecx)
+ movl 20(%eax), %esi
+ shrdl $1, %esi, %edx
+ movl %edx, 16(%ecx)
+ movl 24(%eax), %edx
+ shrdl $1, %edx, %esi
+ movl %esi, 20(%ecx)
+ movl 28(%eax), %esi
+ shrdl $1, %esi, %edx
+ movl %edx, 24(%ecx)
+ movl 32(%eax), %edx
+ shrdl $1, %edx, %esi
+ movl %esi, 28(%ecx)
+ movl 36(%eax), %esi
+ shrdl $1, %esi, %edx
+ movl %edx, 32(%ecx)
+ movl 40(%eax), %edx
+ shrdl $1, %edx, %esi
+ movl %esi, 36(%ecx)
+ movl 44(%eax), %esi
+ shrdl $1, %esi, %edx
+ movl %edx, 40(%ecx)
+ movl 48(%eax), %edx
+ shrdl $1, %edx, %esi
+ movl %esi, 44(%ecx)
+ movl 52(%eax), %esi
+ shrdl $1, %esi, %edx
+ movl %edx, 48(%ecx)
+ movl 56(%eax), %eax
+ shrdl $1, %eax, %esi
+ movl %esi, 52(%ecx)
+ shrl %eax
+ movl %eax, 56(%ecx)
+ popl %esi
+ retl
+.Lfunc_end231:
+ .size mcl_fp_shr1_15L, .Lfunc_end231-mcl_fp_shr1_15L
+
+ .globl mcl_fp_add15L
+ .align 16, 0x90
+ .type mcl_fp_add15L,@function
+mcl_fp_add15L: # @mcl_fp_add15L
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $48, %esp
+ movl 76(%esp), %ecx
+ movl (%ecx), %esi
+ movl 4(%ecx), %edx
+ movl 72(%esp), %eax
+ addl (%eax), %esi
+ movl %esi, 4(%esp) # 4-byte Spill
+ adcl 4(%eax), %edx
+ movl %edx, 44(%esp) # 4-byte Spill
+ movl 8(%ecx), %edx
+ adcl 8(%eax), %edx
+ movl %edx, 40(%esp) # 4-byte Spill
+ movl 12(%eax), %esi
+ movl 16(%eax), %edx
+ adcl 12(%ecx), %esi
+ movl %esi, 36(%esp) # 4-byte Spill
+ adcl 16(%ecx), %edx
+ movl %edx, 32(%esp) # 4-byte Spill
+ movl 20(%eax), %edx
+ adcl 20(%ecx), %edx
+ movl %edx, 28(%esp) # 4-byte Spill
+ movl 24(%eax), %edx
+ adcl 24(%ecx), %edx
+ movl %edx, 24(%esp) # 4-byte Spill
+ movl 28(%eax), %edx
+ adcl 28(%ecx), %edx
+ movl %edx, 20(%esp) # 4-byte Spill
+ movl 32(%eax), %edx
+ adcl 32(%ecx), %edx
+ movl %edx, 16(%esp) # 4-byte Spill
+ movl 36(%eax), %edx
+ adcl 36(%ecx), %edx
+ movl %edx, 12(%esp) # 4-byte Spill
+ movl 40(%eax), %edx
+ adcl 40(%ecx), %edx
+ movl %edx, 8(%esp) # 4-byte Spill
+ movl 44(%eax), %ebx
+ adcl 44(%ecx), %ebx
+ movl %ebx, (%esp) # 4-byte Spill
+ movl 48(%eax), %ebp
+ adcl 48(%ecx), %ebp
+ movl 52(%eax), %edi
+ adcl 52(%ecx), %edi
+ movl 56(%eax), %edx
+ adcl 56(%ecx), %edx
+ movl 68(%esp), %ecx
+ movl 4(%esp), %eax # 4-byte Reload
+ movl %eax, (%ecx)
+ movl 44(%esp), %esi # 4-byte Reload
+ movl %esi, 4(%ecx)
+ movl 40(%esp), %esi # 4-byte Reload
+ movl %esi, 8(%ecx)
+ movl 36(%esp), %esi # 4-byte Reload
+ movl %esi, 12(%ecx)
+ movl 32(%esp), %esi # 4-byte Reload
+ movl %esi, 16(%ecx)
+ movl 28(%esp), %esi # 4-byte Reload
+ movl %esi, 20(%ecx)
+ movl 24(%esp), %esi # 4-byte Reload
+ movl %esi, 24(%ecx)
+ movl 20(%esp), %esi # 4-byte Reload
+ movl %esi, 28(%ecx)
+ movl 16(%esp), %esi # 4-byte Reload
+ movl %esi, 32(%ecx)
+ movl 12(%esp), %esi # 4-byte Reload
+ movl %esi, 36(%ecx)
+ movl 8(%esp), %esi # 4-byte Reload
+ movl %esi, 40(%ecx)
+ movl %ebx, 44(%ecx)
+ movl %ebp, 48(%ecx)
+ movl %edi, 52(%ecx)
+ movl %edx, 56(%ecx)
+ sbbl %ebx, %ebx
+ andl $1, %ebx
+ movl 80(%esp), %esi
+ subl (%esi), %eax
+ movl %eax, 4(%esp) # 4-byte Spill
+ movl 44(%esp), %eax # 4-byte Reload
+ sbbl 4(%esi), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl %edx, %eax
+ movl 40(%esp), %edx # 4-byte Reload
+ sbbl 8(%esi), %edx
+ movl %edx, 40(%esp) # 4-byte Spill
+ movl 36(%esp), %edx # 4-byte Reload
+ sbbl 12(%esi), %edx
+ movl %edx, 36(%esp) # 4-byte Spill
+ movl 32(%esp), %edx # 4-byte Reload
+ sbbl 16(%esi), %edx
+ movl %edx, 32(%esp) # 4-byte Spill
+ movl 28(%esp), %edx # 4-byte Reload
+ sbbl 20(%esi), %edx
+ movl %edx, 28(%esp) # 4-byte Spill
+ movl 24(%esp), %edx # 4-byte Reload
+ sbbl 24(%esi), %edx
+ movl %edx, 24(%esp) # 4-byte Spill
+ movl 20(%esp), %edx # 4-byte Reload
+ sbbl 28(%esi), %edx
+ movl %edx, 20(%esp) # 4-byte Spill
+ movl 16(%esp), %edx # 4-byte Reload
+ sbbl 32(%esi), %edx
+ movl %edx, 16(%esp) # 4-byte Spill
+ movl 12(%esp), %edx # 4-byte Reload
+ sbbl 36(%esi), %edx
+ movl %edx, 12(%esp) # 4-byte Spill
+ movl 8(%esp), %edx # 4-byte Reload
+ sbbl 40(%esi), %edx
+ movl %edx, 8(%esp) # 4-byte Spill
+ movl (%esp), %edx # 4-byte Reload
+ sbbl 44(%esi), %edx
+ movl %edx, (%esp) # 4-byte Spill
+ sbbl 48(%esi), %ebp
+ sbbl 52(%esi), %edi
+ sbbl 56(%esi), %eax
+ sbbl $0, %ebx
+ testb $1, %bl
+ jne .LBB232_2
+# BB#1: # %nocarry
+ movl 4(%esp), %edx # 4-byte Reload
+ movl %edx, (%ecx)
+ movl 44(%esp), %edx # 4-byte Reload
+ movl %edx, 4(%ecx)
+ movl 40(%esp), %edx # 4-byte Reload
+ movl %edx, 8(%ecx)
+ movl 36(%esp), %edx # 4-byte Reload
+ movl %edx, 12(%ecx)
+ movl 32(%esp), %edx # 4-byte Reload
+ movl %edx, 16(%ecx)
+ movl 28(%esp), %edx # 4-byte Reload
+ movl %edx, 20(%ecx)
+ movl 24(%esp), %edx # 4-byte Reload
+ movl %edx, 24(%ecx)
+ movl 20(%esp), %edx # 4-byte Reload
+ movl %edx, 28(%ecx)
+ movl 16(%esp), %edx # 4-byte Reload
+ movl %edx, 32(%ecx)
+ movl 12(%esp), %edx # 4-byte Reload
+ movl %edx, 36(%ecx)
+ movl 8(%esp), %edx # 4-byte Reload
+ movl %edx, 40(%ecx)
+ movl (%esp), %edx # 4-byte Reload
+ movl %edx, 44(%ecx)
+ movl %ebp, 48(%ecx)
+ movl %edi, 52(%ecx)
+ movl %eax, 56(%ecx)
+.LBB232_2: # %carry
+ addl $48, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end232:
+ .size mcl_fp_add15L, .Lfunc_end232-mcl_fp_add15L
+
+ .globl mcl_fp_addNF15L
+ .align 16, 0x90
+ .type mcl_fp_addNF15L,@function
+mcl_fp_addNF15L: # @mcl_fp_addNF15L
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $120, %esp
+ movl 148(%esp), %ecx
+ movl (%ecx), %eax
+ movl 4(%ecx), %edx
+ movl 144(%esp), %esi
+ addl (%esi), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ adcl 4(%esi), %edx
+ movl %edx, 84(%esp) # 4-byte Spill
+ movl 56(%ecx), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 52(%ecx), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 48(%ecx), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 44(%ecx), %ebp
+ movl 40(%ecx), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 36(%ecx), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 32(%ecx), %eax
+ movl %eax, 116(%esp) # 4-byte Spill
+ movl 28(%ecx), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 24(%ecx), %eax
+ movl 20(%ecx), %ebx
+ movl 16(%ecx), %edi
+ movl 12(%ecx), %edx
+ movl 8(%ecx), %ecx
+ adcl 8(%esi), %ecx
+ movl %ecx, 76(%esp) # 4-byte Spill
+ adcl 12(%esi), %edx
+ movl %edx, 64(%esp) # 4-byte Spill
+ adcl 16(%esi), %edi
+ movl %edi, 68(%esp) # 4-byte Spill
+ adcl 20(%esi), %ebx
+ movl %ebx, 72(%esp) # 4-byte Spill
+ adcl 24(%esi), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 28(%esi), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 116(%esp), %eax # 4-byte Reload
+ adcl 32(%esi), %eax
+ movl %eax, 116(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 36(%esi), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 112(%esp), %eax # 4-byte Reload
+ adcl 40(%esi), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ adcl 44(%esi), %ebp
+ movl %ebp, 60(%esp) # 4-byte Spill
+ movl 96(%esp), %ebp # 4-byte Reload
+ adcl 48(%esi), %ebp
+ movl %ebp, 96(%esp) # 4-byte Spill
+ movl 92(%esp), %ebp # 4-byte Reload
+ adcl 52(%esi), %ebp
+ movl %ebp, 92(%esp) # 4-byte Spill
+ movl 100(%esp), %ebp # 4-byte Reload
+ adcl 56(%esi), %ebp
+ movl %ebp, 100(%esp) # 4-byte Spill
+ movl 152(%esp), %esi
+ movl 80(%esp), %eax # 4-byte Reload
+ subl (%esi), %eax
+ movl %eax, (%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ sbbl 4(%esi), %eax
+ movl %eax, 4(%esp) # 4-byte Spill
+ sbbl 8(%esi), %ecx
+ movl %ecx, 8(%esp) # 4-byte Spill
+ sbbl 12(%esi), %edx
+ movl %edx, 12(%esp) # 4-byte Spill
+ sbbl 16(%esi), %edi
+ movl %edi, 16(%esp) # 4-byte Spill
+ sbbl 20(%esi), %ebx
+ movl %ebx, 20(%esp) # 4-byte Spill
+ movl 88(%esp), %ebx # 4-byte Reload
+ sbbl 24(%esi), %ebx
+ movl %ebx, 24(%esp) # 4-byte Spill
+ movl 104(%esp), %ebp # 4-byte Reload
+ sbbl 28(%esi), %ebp
+ movl %ebp, 28(%esp) # 4-byte Spill
+ movl 116(%esp), %eax # 4-byte Reload
+ sbbl 32(%esi), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ sbbl 36(%esi), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 112(%esp), %eax # 4-byte Reload
+ sbbl 40(%esi), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 60(%esp), %edx # 4-byte Reload
+ movl %edx, %eax
+ sbbl 44(%esi), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ movl %eax, %edi
+ sbbl 48(%esi), %edi
+ movl %edi, 48(%esp) # 4-byte Spill
+ movl 92(%esp), %ecx # 4-byte Reload
+ movl %ecx, %edi
+ movl %ecx, %ebx
+ sbbl 52(%esi), %edi
+ movl %edi, 52(%esp) # 4-byte Spill
+ movl 100(%esp), %ecx # 4-byte Reload
+ movl %ecx, %edi
+ sbbl 56(%esi), %edi
+ movl %edi, 56(%esp) # 4-byte Spill
+ movl %edi, %esi
+ sarl $31, %esi
+ testl %esi, %esi
+ movl 80(%esp), %esi # 4-byte Reload
+ js .LBB233_2
+# BB#1:
+ movl (%esp), %esi # 4-byte Reload
+.LBB233_2:
+ movl 140(%esp), %edi
+ movl %esi, (%edi)
+ movl 84(%esp), %ecx # 4-byte Reload
+ js .LBB233_4
+# BB#3:
+ movl 4(%esp), %ecx # 4-byte Reload
+.LBB233_4:
+ movl %ecx, 4(%edi)
+ movl 104(%esp), %ecx # 4-byte Reload
+ movl 72(%esp), %esi # 4-byte Reload
+ js .LBB233_6
+# BB#5:
+ movl 8(%esp), %eax # 4-byte Reload
+ movl %eax, 76(%esp) # 4-byte Spill
+.LBB233_6:
+ movl 76(%esp), %ebp # 4-byte Reload
+ movl %ebp, 8(%edi)
+ movl 64(%esp), %eax # 4-byte Reload
+ js .LBB233_8
+# BB#7:
+ movl 12(%esp), %eax # 4-byte Reload
+.LBB233_8:
+ movl %eax, 12(%edi)
+ movl %ebx, %ebp
+ movl %edx, %eax
+ movl 68(%esp), %edx # 4-byte Reload
+ js .LBB233_10
+# BB#9:
+ movl 16(%esp), %edx # 4-byte Reload
+.LBB233_10:
+ movl %edx, 16(%edi)
+ movl 112(%esp), %edx # 4-byte Reload
+ movl 108(%esp), %ebx # 4-byte Reload
+ js .LBB233_12
+# BB#11:
+ movl 20(%esp), %esi # 4-byte Reload
+.LBB233_12:
+ movl %esi, 20(%edi)
+ js .LBB233_14
+# BB#13:
+ movl 24(%esp), %esi # 4-byte Reload
+ movl %esi, 88(%esp) # 4-byte Spill
+.LBB233_14:
+ movl 88(%esp), %esi # 4-byte Reload
+ movl %esi, 24(%edi)
+ js .LBB233_16
+# BB#15:
+ movl 28(%esp), %ecx # 4-byte Reload
+.LBB233_16:
+ movl %ecx, 28(%edi)
+ js .LBB233_18
+# BB#17:
+ movl 32(%esp), %ecx # 4-byte Reload
+ movl %ecx, 116(%esp) # 4-byte Spill
+.LBB233_18:
+ movl 116(%esp), %ecx # 4-byte Reload
+ movl %ecx, 32(%edi)
+ js .LBB233_20
+# BB#19:
+ movl 36(%esp), %ebx # 4-byte Reload
+.LBB233_20:
+ movl %ebx, 36(%edi)
+ js .LBB233_22
+# BB#21:
+ movl 40(%esp), %edx # 4-byte Reload
+.LBB233_22:
+ movl %edx, 40(%edi)
+ js .LBB233_24
+# BB#23:
+ movl 44(%esp), %eax # 4-byte Reload
+.LBB233_24:
+ movl %eax, 44(%edi)
+ movl 96(%esp), %eax # 4-byte Reload
+ js .LBB233_26
+# BB#25:
+ movl 48(%esp), %eax # 4-byte Reload
+.LBB233_26:
+ movl %eax, 48(%edi)
+ js .LBB233_28
+# BB#27:
+ movl 52(%esp), %ebp # 4-byte Reload
+.LBB233_28:
+ movl %ebp, 52(%edi)
+ movl 100(%esp), %eax # 4-byte Reload
+ js .LBB233_30
+# BB#29:
+ movl 56(%esp), %eax # 4-byte Reload
+.LBB233_30:
+ movl %eax, 56(%edi)
+ addl $120, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end233:
+ .size mcl_fp_addNF15L, .Lfunc_end233-mcl_fp_addNF15L
+
+ .globl mcl_fp_sub15L
+ .align 16, 0x90
+ .type mcl_fp_sub15L,@function
+mcl_fp_sub15L: # @mcl_fp_sub15L
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $56, %esp
+ movl 80(%esp), %esi
+ movl (%esi), %eax
+ movl 4(%esi), %ecx
+ xorl %ebx, %ebx
+ movl 84(%esp), %edi
+ subl (%edi), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ sbbl 4(%edi), %ecx
+ movl %ecx, 16(%esp) # 4-byte Spill
+ movl 8(%esi), %eax
+ sbbl 8(%edi), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 12(%esi), %eax
+ sbbl 12(%edi), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 16(%esi), %eax
+ sbbl 16(%edi), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 20(%esi), %eax
+ sbbl 20(%edi), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 24(%esi), %eax
+ sbbl 24(%edi), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 28(%esi), %eax
+ sbbl 28(%edi), %eax
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl 32(%esi), %eax
+ sbbl 32(%edi), %eax
+ movl %eax, 24(%esp) # 4-byte Spill
+ movl 36(%esi), %eax
+ sbbl 36(%edi), %eax
+ movl %eax, 20(%esp) # 4-byte Spill
+ movl 40(%esi), %edx
+ sbbl 40(%edi), %edx
+ movl %edx, 12(%esp) # 4-byte Spill
+ movl 44(%esi), %ecx
+ sbbl 44(%edi), %ecx
+ movl %ecx, 8(%esp) # 4-byte Spill
+ movl 48(%esi), %eax
+ sbbl 48(%edi), %eax
+ movl %eax, 4(%esp) # 4-byte Spill
+ movl 52(%esi), %ebp
+ sbbl 52(%edi), %ebp
+ movl 56(%esi), %esi
+ sbbl 56(%edi), %esi
+ sbbl $0, %ebx
+ testb $1, %bl
+ movl 76(%esp), %ebx
+ movl 48(%esp), %edi # 4-byte Reload
+ movl %edi, (%ebx)
+ movl 16(%esp), %edi # 4-byte Reload
+ movl %edi, 4(%ebx)
+ movl 40(%esp), %edi # 4-byte Reload
+ movl %edi, 8(%ebx)
+ movl 52(%esp), %edi # 4-byte Reload
+ movl %edi, 12(%ebx)
+ movl 44(%esp), %edi # 4-byte Reload
+ movl %edi, 16(%ebx)
+ movl 36(%esp), %edi # 4-byte Reload
+ movl %edi, 20(%ebx)
+ movl 32(%esp), %edi # 4-byte Reload
+ movl %edi, 24(%ebx)
+ movl 28(%esp), %edi # 4-byte Reload
+ movl %edi, 28(%ebx)
+ movl 24(%esp), %edi # 4-byte Reload
+ movl %edi, 32(%ebx)
+ movl 20(%esp), %edi # 4-byte Reload
+ movl %edi, 36(%ebx)
+ movl %edx, 40(%ebx)
+ movl %ecx, 44(%ebx)
+ movl %eax, 48(%ebx)
+ movl %ebp, 52(%ebx)
+ movl %esi, 56(%ebx)
+ je .LBB234_2
+# BB#1: # %carry
+ movl %esi, (%esp) # 4-byte Spill
+ movl 88(%esp), %esi
+ movl 48(%esp), %ecx # 4-byte Reload
+ addl (%esi), %ecx
+ movl %ecx, (%ebx)
+ movl 16(%esp), %edx # 4-byte Reload
+ adcl 4(%esi), %edx
+ movl %edx, 4(%ebx)
+ movl 40(%esp), %edi # 4-byte Reload
+ adcl 8(%esi), %edi
+ movl 12(%esi), %eax
+ adcl 52(%esp), %eax # 4-byte Folded Reload
+ movl %edi, 8(%ebx)
+ movl 16(%esi), %ecx
+ adcl 44(%esp), %ecx # 4-byte Folded Reload
+ movl %eax, 12(%ebx)
+ movl 20(%esi), %eax
+ adcl 36(%esp), %eax # 4-byte Folded Reload
+ movl %ecx, 16(%ebx)
+ movl 24(%esi), %ecx
+ adcl 32(%esp), %ecx # 4-byte Folded Reload
+ movl %eax, 20(%ebx)
+ movl 28(%esi), %eax
+ adcl 28(%esp), %eax # 4-byte Folded Reload
+ movl %ecx, 24(%ebx)
+ movl 32(%esi), %ecx
+ adcl 24(%esp), %ecx # 4-byte Folded Reload
+ movl %eax, 28(%ebx)
+ movl 36(%esi), %eax
+ adcl 20(%esp), %eax # 4-byte Folded Reload
+ movl %ecx, 32(%ebx)
+ movl 40(%esi), %ecx
+ adcl 12(%esp), %ecx # 4-byte Folded Reload
+ movl %eax, 36(%ebx)
+ movl 44(%esi), %eax
+ adcl 8(%esp), %eax # 4-byte Folded Reload
+ movl %ecx, 40(%ebx)
+ movl 48(%esi), %ecx
+ adcl 4(%esp), %ecx # 4-byte Folded Reload
+ movl %eax, 44(%ebx)
+ movl %ecx, 48(%ebx)
+ movl 52(%esi), %eax
+ adcl %ebp, %eax
+ movl %eax, 52(%ebx)
+ movl 56(%esi), %eax
+ adcl (%esp), %eax # 4-byte Folded Reload
+ movl %eax, 56(%ebx)
+.LBB234_2: # %nocarry
+ addl $56, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end234:
+ .size mcl_fp_sub15L, .Lfunc_end234-mcl_fp_sub15L
+
+ .globl mcl_fp_subNF15L
+ .align 16, 0x90
+ .type mcl_fp_subNF15L,@function
+mcl_fp_subNF15L: # @mcl_fp_subNF15L
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $96, %esp
+ movl 120(%esp), %ecx
+ movl (%ecx), %esi
+ movl 4(%ecx), %edx
+ movl 124(%esp), %edi
+ subl (%edi), %esi
+ movl %esi, 60(%esp) # 4-byte Spill
+ sbbl 4(%edi), %edx
+ movl %edx, 64(%esp) # 4-byte Spill
+ movl 56(%ecx), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 52(%ecx), %edx
+ movl %edx, 88(%esp) # 4-byte Spill
+ movl 48(%ecx), %edx
+ movl %edx, 84(%esp) # 4-byte Spill
+ movl 44(%ecx), %edx
+ movl %edx, 80(%esp) # 4-byte Spill
+ movl 40(%ecx), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 36(%ecx), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 32(%ecx), %esi
+ movl %esi, 92(%esp) # 4-byte Spill
+ movl 28(%ecx), %ebp
+ movl 24(%ecx), %ebx
+ movl 20(%ecx), %esi
+ movl 16(%ecx), %edx
+ movl 12(%ecx), %eax
+ movl 8(%ecx), %ecx
+ sbbl 8(%edi), %ecx
+ movl %ecx, 32(%esp) # 4-byte Spill
+ sbbl 12(%edi), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ sbbl 16(%edi), %edx
+ movl %edx, 40(%esp) # 4-byte Spill
+ sbbl 20(%edi), %esi
+ movl %esi, 48(%esp) # 4-byte Spill
+ sbbl 24(%edi), %ebx
+ movl %ebx, 52(%esp) # 4-byte Spill
+ sbbl 28(%edi), %ebp
+ movl %ebp, 56(%esp) # 4-byte Spill
+ movl 92(%esp), %ecx # 4-byte Reload
+ sbbl 32(%edi), %ecx
+ movl %ecx, 92(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ sbbl 36(%edi), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ sbbl 40(%edi), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 80(%esp), %ecx # 4-byte Reload
+ sbbl 44(%edi), %ecx
+ movl %ecx, 80(%esp) # 4-byte Spill
+ movl 84(%esp), %ecx # 4-byte Reload
+ sbbl 48(%edi), %ecx
+ movl %ecx, 84(%esp) # 4-byte Spill
+ movl 88(%esp), %ecx # 4-byte Reload
+ sbbl 52(%edi), %ecx
+ movl %ecx, 88(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ sbbl 56(%edi), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl %eax, %ebp
+ sarl $31, %ebp
+ movl %ebp, %edi
+ shldl $1, %eax, %edi
+ movl 128(%esp), %edx
+ andl (%edx), %edi
+ movl 56(%edx), %eax
+ andl %ebp, %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 52(%edx), %eax
+ andl %ebp, %eax
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl 48(%edx), %eax
+ andl %ebp, %eax
+ movl %eax, 24(%esp) # 4-byte Spill
+ movl 44(%edx), %eax
+ andl %ebp, %eax
+ movl %eax, 20(%esp) # 4-byte Spill
+ movl 40(%edx), %eax
+ andl %ebp, %eax
+ movl %eax, 16(%esp) # 4-byte Spill
+ movl 36(%edx), %eax
+ andl %ebp, %eax
+ movl %eax, 12(%esp) # 4-byte Spill
+ movl 32(%edx), %eax
+ andl %ebp, %eax
+ movl %eax, 8(%esp) # 4-byte Spill
+ movl 28(%edx), %eax
+ andl %ebp, %eax
+ movl %eax, 4(%esp) # 4-byte Spill
+ movl 24(%edx), %eax
+ andl %ebp, %eax
+ movl %eax, (%esp) # 4-byte Spill
+ movl 20(%edx), %ebx
+ andl %ebp, %ebx
+ movl 16(%edx), %esi
+ andl %ebp, %esi
+ movl 12(%edx), %ecx
+ andl %ebp, %ecx
+ movl 8(%edx), %eax
+ andl %ebp, %eax
+ andl 4(%edx), %ebp
+ addl 60(%esp), %edi # 4-byte Folded Reload
+ adcl 64(%esp), %ebp # 4-byte Folded Reload
+ movl 116(%esp), %edx
+ movl %edi, (%edx)
+ adcl 32(%esp), %eax # 4-byte Folded Reload
+ movl %ebp, 4(%edx)
+ adcl 36(%esp), %ecx # 4-byte Folded Reload
+ movl %eax, 8(%edx)
+ adcl 40(%esp), %esi # 4-byte Folded Reload
+ movl %ecx, 12(%edx)
+ adcl 48(%esp), %ebx # 4-byte Folded Reload
+ movl %esi, 16(%edx)
+ movl (%esp), %ecx # 4-byte Reload
+ adcl 52(%esp), %ecx # 4-byte Folded Reload
+ movl %ebx, 20(%edx)
+ movl 4(%esp), %eax # 4-byte Reload
+ adcl 56(%esp), %eax # 4-byte Folded Reload
+ movl %ecx, 24(%edx)
+ movl 8(%esp), %ecx # 4-byte Reload
+ adcl 92(%esp), %ecx # 4-byte Folded Reload
+ movl %eax, 28(%edx)
+ movl 12(%esp), %eax # 4-byte Reload
+ adcl 68(%esp), %eax # 4-byte Folded Reload
+ movl %ecx, 32(%edx)
+ movl 16(%esp), %ecx # 4-byte Reload
+ adcl 76(%esp), %ecx # 4-byte Folded Reload
+ movl %eax, 36(%edx)
+ movl 20(%esp), %eax # 4-byte Reload
+ adcl 80(%esp), %eax # 4-byte Folded Reload
+ movl %ecx, 40(%edx)
+ movl 24(%esp), %ecx # 4-byte Reload
+ adcl 84(%esp), %ecx # 4-byte Folded Reload
+ movl %eax, 44(%edx)
+ movl 28(%esp), %eax # 4-byte Reload
+ adcl 88(%esp), %eax # 4-byte Folded Reload
+ movl %ecx, 48(%edx)
+ movl %eax, 52(%edx)
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 72(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 56(%edx)
+ addl $96, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end235:
+ .size mcl_fp_subNF15L, .Lfunc_end235-mcl_fp_subNF15L
+
+ .globl mcl_fpDbl_add15L
+ .align 16, 0x90
+ .type mcl_fpDbl_add15L,@function
+mcl_fpDbl_add15L: # @mcl_fpDbl_add15L
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $108, %esp
+ movl 136(%esp), %ecx
+ movl 132(%esp), %edx
+ movl 12(%edx), %edi
+ movl 16(%edx), %esi
+ movl 8(%ecx), %ebx
+ movl (%ecx), %ebp
+ addl (%edx), %ebp
+ movl 128(%esp), %eax
+ movl %ebp, (%eax)
+ movl 4(%ecx), %ebp
+ adcl 4(%edx), %ebp
+ adcl 8(%edx), %ebx
+ adcl 12(%ecx), %edi
+ adcl 16(%ecx), %esi
+ movl %ebp, 4(%eax)
+ movl 68(%ecx), %ebp
+ movl %ebx, 8(%eax)
+ movl 20(%ecx), %ebx
+ movl %edi, 12(%eax)
+ movl 20(%edx), %edi
+ adcl %ebx, %edi
+ movl 24(%ecx), %ebx
+ movl %esi, 16(%eax)
+ movl 24(%edx), %esi
+ adcl %ebx, %esi
+ movl 28(%ecx), %ebx
+ movl %edi, 20(%eax)
+ movl 28(%edx), %edi
+ adcl %ebx, %edi
+ movl 32(%ecx), %ebx
+ movl %esi, 24(%eax)
+ movl 32(%edx), %esi
+ adcl %ebx, %esi
+ movl 36(%ecx), %ebx
+ movl %edi, 28(%eax)
+ movl 36(%edx), %edi
+ adcl %ebx, %edi
+ movl 40(%ecx), %ebx
+ movl %esi, 32(%eax)
+ movl 40(%edx), %esi
+ adcl %ebx, %esi
+ movl 44(%ecx), %ebx
+ movl %edi, 36(%eax)
+ movl 44(%edx), %edi
+ adcl %ebx, %edi
+ movl 48(%ecx), %ebx
+ movl %esi, 40(%eax)
+ movl 48(%edx), %esi
+ adcl %ebx, %esi
+ movl 52(%ecx), %ebx
+ movl %edi, 44(%eax)
+ movl 52(%edx), %edi
+ adcl %ebx, %edi
+ movl 56(%ecx), %ebx
+ movl %esi, 48(%eax)
+ movl 56(%edx), %esi
+ adcl %ebx, %esi
+ movl 60(%ecx), %ebx
+ movl %edi, 52(%eax)
+ movl 60(%edx), %edi
+ adcl %ebx, %edi
+ movl %edi, 76(%esp) # 4-byte Spill
+ movl 64(%ecx), %edi
+ movl %esi, 56(%eax)
+ movl 64(%edx), %eax
+ adcl %edi, %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 68(%edx), %eax
+ adcl %ebp, %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 72(%ecx), %esi
+ movl 72(%edx), %eax
+ adcl %esi, %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 76(%ecx), %esi
+ movl 76(%edx), %eax
+ adcl %esi, %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 80(%ecx), %esi
+ movl 80(%edx), %eax
+ adcl %esi, %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 84(%ecx), %esi
+ movl 84(%edx), %eax
+ adcl %esi, %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 88(%ecx), %esi
+ movl 88(%edx), %eax
+ adcl %esi, %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 92(%ecx), %esi
+ movl 92(%edx), %eax
+ adcl %esi, %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 96(%ecx), %esi
+ movl 96(%edx), %eax
+ adcl %esi, %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 100(%ecx), %esi
+ movl 100(%edx), %eax
+ adcl %esi, %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 104(%ecx), %eax
+ movl 104(%edx), %esi
+ adcl %eax, %esi
+ movl %esi, 52(%esp) # 4-byte Spill
+ movl 108(%ecx), %edi
+ movl 108(%edx), %eax
+ adcl %edi, %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 112(%ecx), %ebx
+ movl 112(%edx), %edi
+ adcl %ebx, %edi
+ movl %edi, 60(%esp) # 4-byte Spill
+ movl 116(%ecx), %ecx
+ movl 116(%edx), %edx
+ adcl %ecx, %edx
+ sbbl %ebx, %ebx
+ andl $1, %ebx
+ movl 140(%esp), %ebp
+ movl 76(%esp), %ecx # 4-byte Reload
+ subl (%ebp), %ecx
+ movl %ecx, 36(%esp) # 4-byte Spill
+ movl 80(%esp), %ecx # 4-byte Reload
+ sbbl 4(%ebp), %ecx
+ movl %ecx, 32(%esp) # 4-byte Spill
+ movl 84(%esp), %ecx # 4-byte Reload
+ sbbl 8(%ebp), %ecx
+ movl %ecx, 28(%esp) # 4-byte Spill
+ movl 88(%esp), %ecx # 4-byte Reload
+ sbbl 12(%ebp), %ecx
+ movl %ecx, 24(%esp) # 4-byte Spill
+ movl 92(%esp), %ecx # 4-byte Reload
+ sbbl 16(%ebp), %ecx
+ movl %ecx, 20(%esp) # 4-byte Spill
+ movl 96(%esp), %ecx # 4-byte Reload
+ sbbl 20(%ebp), %ecx
+ movl %ecx, 16(%esp) # 4-byte Spill
+ movl 100(%esp), %ecx # 4-byte Reload
+ sbbl 24(%ebp), %ecx
+ movl %ecx, 12(%esp) # 4-byte Spill
+ movl 104(%esp), %ecx # 4-byte Reload
+ sbbl 28(%ebp), %ecx
+ movl %ecx, 8(%esp) # 4-byte Spill
+ movl 64(%esp), %ecx # 4-byte Reload
+ sbbl 32(%ebp), %ecx
+ movl %ecx, 4(%esp) # 4-byte Spill
+ movl 68(%esp), %ecx # 4-byte Reload
+ sbbl 36(%ebp), %ecx
+ movl %ecx, (%esp) # 4-byte Spill
+ movl 72(%esp), %ecx # 4-byte Reload
+ sbbl 40(%ebp), %ecx
+ sbbl 44(%ebp), %esi
+ movl %esi, 40(%esp) # 4-byte Spill
+ sbbl 48(%ebp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl %edi, %eax
+ movl %edx, %edi
+ sbbl 52(%ebp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl %edi, %esi
+ sbbl 56(%ebp), %esi
+ sbbl $0, %ebx
+ andl $1, %ebx
+ jne .LBB236_2
+# BB#1:
+ movl %esi, %edi
+.LBB236_2:
+ testb %bl, %bl
+ movl 76(%esp), %eax # 4-byte Reload
+ movl 72(%esp), %esi # 4-byte Reload
+ movl 68(%esp), %ebx # 4-byte Reload
+ movl 64(%esp), %ebp # 4-byte Reload
+ jne .LBB236_4
+# BB#3:
+ movl %ecx, %esi
+ movl (%esp), %ebx # 4-byte Reload
+ movl 4(%esp), %ebp # 4-byte Reload
+ movl 8(%esp), %eax # 4-byte Reload
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 12(%esp), %eax # 4-byte Reload
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 16(%esp), %eax # 4-byte Reload
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 20(%esp), %eax # 4-byte Reload
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 24(%esp), %eax # 4-byte Reload
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 28(%esp), %eax # 4-byte Reload
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 32(%esp), %eax # 4-byte Reload
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 36(%esp), %eax # 4-byte Reload
+.LBB236_4:
+ movl 128(%esp), %edx
+ movl %eax, 60(%edx)
+ movl 80(%esp), %eax # 4-byte Reload
+ movl %eax, 64(%edx)
+ movl 84(%esp), %eax # 4-byte Reload
+ movl %eax, 68(%edx)
+ movl 88(%esp), %eax # 4-byte Reload
+ movl %eax, 72(%edx)
+ movl 92(%esp), %eax # 4-byte Reload
+ movl %eax, 76(%edx)
+ movl 96(%esp), %eax # 4-byte Reload
+ movl %eax, 80(%edx)
+ movl 100(%esp), %eax # 4-byte Reload
+ movl %eax, 84(%edx)
+ movl 104(%esp), %eax # 4-byte Reload
+ movl %eax, 88(%edx)
+ movl %ebp, 92(%edx)
+ movl %ebx, 96(%edx)
+ movl %esi, 100(%edx)
+ movl 52(%esp), %eax # 4-byte Reload
+ jne .LBB236_6
+# BB#5:
+ movl 40(%esp), %eax # 4-byte Reload
+.LBB236_6:
+ movl %eax, 104(%edx)
+ movl 60(%esp), %ecx # 4-byte Reload
+ movl 56(%esp), %eax # 4-byte Reload
+ jne .LBB236_8
+# BB#7:
+ movl 44(%esp), %eax # 4-byte Reload
+.LBB236_8:
+ movl %eax, 108(%edx)
+ jne .LBB236_10
+# BB#9:
+ movl 48(%esp), %ecx # 4-byte Reload
+.LBB236_10:
+ movl %ecx, 112(%edx)
+ movl %edi, 116(%edx)
+ addl $108, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end236:
+ .size mcl_fpDbl_add15L, .Lfunc_end236-mcl_fpDbl_add15L
+
+ .globl mcl_fpDbl_sub15L
+ .align 16, 0x90
+ .type mcl_fpDbl_sub15L,@function
+mcl_fpDbl_sub15L: # @mcl_fpDbl_sub15L
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $100, %esp
+ movl 124(%esp), %eax
+ movl (%eax), %edx
+ movl 4(%eax), %esi
+ movl 128(%esp), %ebp
+ subl (%ebp), %edx
+ sbbl 4(%ebp), %esi
+ movl 8(%eax), %edi
+ sbbl 8(%ebp), %edi
+ movl 120(%esp), %ecx
+ movl %edx, (%ecx)
+ movl 12(%eax), %edx
+ sbbl 12(%ebp), %edx
+ movl %esi, 4(%ecx)
+ movl 16(%eax), %esi
+ sbbl 16(%ebp), %esi
+ movl %edi, 8(%ecx)
+ movl 20(%ebp), %edi
+ movl %edx, 12(%ecx)
+ movl 20(%eax), %edx
+ sbbl %edi, %edx
+ movl 24(%ebp), %edi
+ movl %esi, 16(%ecx)
+ movl 24(%eax), %esi
+ sbbl %edi, %esi
+ movl 28(%ebp), %edi
+ movl %edx, 20(%ecx)
+ movl 28(%eax), %edx
+ sbbl %edi, %edx
+ movl 32(%ebp), %edi
+ movl %esi, 24(%ecx)
+ movl 32(%eax), %esi
+ sbbl %edi, %esi
+ movl 36(%ebp), %edi
+ movl %edx, 28(%ecx)
+ movl 36(%eax), %edx
+ sbbl %edi, %edx
+ movl 40(%ebp), %edi
+ movl %esi, 32(%ecx)
+ movl 40(%eax), %esi
+ sbbl %edi, %esi
+ movl 44(%ebp), %edi
+ movl %edx, 36(%ecx)
+ movl 44(%eax), %edx
+ sbbl %edi, %edx
+ movl 48(%ebp), %edi
+ movl %esi, 40(%ecx)
+ movl 48(%eax), %esi
+ sbbl %edi, %esi
+ movl 52(%ebp), %edi
+ movl %edx, 44(%ecx)
+ movl 52(%eax), %edx
+ sbbl %edi, %edx
+ movl 56(%ebp), %edi
+ movl %esi, 48(%ecx)
+ movl 56(%eax), %esi
+ sbbl %edi, %esi
+ movl 60(%ebp), %edi
+ movl %edx, 52(%ecx)
+ movl 60(%eax), %edx
+ sbbl %edi, %edx
+ movl %edx, 44(%esp) # 4-byte Spill
+ movl 64(%ebp), %edx
+ movl %esi, 56(%ecx)
+ movl 64(%eax), %esi
+ sbbl %edx, %esi
+ movl %esi, 36(%esp) # 4-byte Spill
+ movl 68(%ebp), %edx
+ movl 68(%eax), %esi
+ sbbl %edx, %esi
+ movl %esi, 40(%esp) # 4-byte Spill
+ movl 72(%ebp), %edx
+ movl 72(%eax), %esi
+ sbbl %edx, %esi
+ movl %esi, 48(%esp) # 4-byte Spill
+ movl 76(%ebp), %edx
+ movl 76(%eax), %esi
+ sbbl %edx, %esi
+ movl %esi, 52(%esp) # 4-byte Spill
+ movl 80(%ebp), %edx
+ movl 80(%eax), %esi
+ sbbl %edx, %esi
+ movl %esi, 56(%esp) # 4-byte Spill
+ movl 84(%ebp), %edx
+ movl 84(%eax), %esi
+ sbbl %edx, %esi
+ movl %esi, 64(%esp) # 4-byte Spill
+ movl 88(%ebp), %edx
+ movl 88(%eax), %esi
+ sbbl %edx, %esi
+ movl %esi, 68(%esp) # 4-byte Spill
+ movl 92(%ebp), %edx
+ movl 92(%eax), %esi
+ sbbl %edx, %esi
+ movl %esi, 72(%esp) # 4-byte Spill
+ movl 96(%ebp), %edx
+ movl 96(%eax), %esi
+ sbbl %edx, %esi
+ movl %esi, 76(%esp) # 4-byte Spill
+ movl 100(%ebp), %edx
+ movl 100(%eax), %esi
+ sbbl %edx, %esi
+ movl %esi, 80(%esp) # 4-byte Spill
+ movl 104(%ebp), %edx
+ movl 104(%eax), %esi
+ sbbl %edx, %esi
+ movl %esi, 84(%esp) # 4-byte Spill
+ movl 108(%ebp), %edx
+ movl 108(%eax), %esi
+ sbbl %edx, %esi
+ movl %esi, 88(%esp) # 4-byte Spill
+ movl 112(%ebp), %edx
+ movl 112(%eax), %esi
+ sbbl %edx, %esi
+ movl %esi, 92(%esp) # 4-byte Spill
+ movl 116(%ebp), %edx
+ movl 116(%eax), %eax
+ sbbl %edx, %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl $0, %eax
+ sbbl $0, %eax
+ andl $1, %eax
+ movl 132(%esp), %esi
+ jne .LBB237_1
+# BB#2:
+ movl $0, 60(%esp) # 4-byte Folded Spill
+ jmp .LBB237_3
+.LBB237_1:
+ movl 56(%esi), %edx
+ movl %edx, 60(%esp) # 4-byte Spill
+.LBB237_3:
+ testb %al, %al
+ jne .LBB237_4
+# BB#5:
+ movl $0, 24(%esp) # 4-byte Folded Spill
+ movl $0, %ebx
+ jmp .LBB237_6
+.LBB237_4:
+ movl (%esi), %ebx
+ movl 4(%esi), %eax
+ movl %eax, 24(%esp) # 4-byte Spill
+.LBB237_6:
+ jne .LBB237_7
+# BB#8:
+ movl $0, 32(%esp) # 4-byte Folded Spill
+ jmp .LBB237_9
+.LBB237_7:
+ movl 52(%esi), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+.LBB237_9:
+ jne .LBB237_10
+# BB#11:
+ movl $0, 28(%esp) # 4-byte Folded Spill
+ jmp .LBB237_12
+.LBB237_10:
+ movl 48(%esi), %eax
+ movl %eax, 28(%esp) # 4-byte Spill
+.LBB237_12:
+ jne .LBB237_13
+# BB#14:
+ movl $0, 20(%esp) # 4-byte Folded Spill
+ jmp .LBB237_15
+.LBB237_13:
+ movl 44(%esi), %eax
+ movl %eax, 20(%esp) # 4-byte Spill
+.LBB237_15:
+ jne .LBB237_16
+# BB#17:
+ movl $0, 16(%esp) # 4-byte Folded Spill
+ jmp .LBB237_18
+.LBB237_16:
+ movl 40(%esi), %eax
+ movl %eax, 16(%esp) # 4-byte Spill
+.LBB237_18:
+ jne .LBB237_19
+# BB#20:
+ movl $0, 12(%esp) # 4-byte Folded Spill
+ jmp .LBB237_21
+.LBB237_19:
+ movl 36(%esi), %eax
+ movl %eax, 12(%esp) # 4-byte Spill
+.LBB237_21:
+ jne .LBB237_22
+# BB#23:
+ movl $0, 8(%esp) # 4-byte Folded Spill
+ jmp .LBB237_24
+.LBB237_22:
+ movl 32(%esi), %eax
+ movl %eax, 8(%esp) # 4-byte Spill
+.LBB237_24:
+ jne .LBB237_25
+# BB#26:
+ movl $0, 4(%esp) # 4-byte Folded Spill
+ jmp .LBB237_27
+.LBB237_25:
+ movl 28(%esi), %eax
+ movl %eax, 4(%esp) # 4-byte Spill
+.LBB237_27:
+ jne .LBB237_28
+# BB#29:
+ movl $0, (%esp) # 4-byte Folded Spill
+ jmp .LBB237_30
+.LBB237_28:
+ movl 24(%esi), %eax
+ movl %eax, (%esp) # 4-byte Spill
+.LBB237_30:
+ jne .LBB237_31
+# BB#32:
+ movl $0, %edx
+ jmp .LBB237_33
+.LBB237_31:
+ movl 20(%esi), %edx
+.LBB237_33:
+ jne .LBB237_34
+# BB#35:
+ movl $0, %ebp
+ jmp .LBB237_36
+.LBB237_34:
+ movl 16(%esi), %ebp
+.LBB237_36:
+ jne .LBB237_37
+# BB#38:
+ movl $0, %eax
+ jmp .LBB237_39
+.LBB237_37:
+ movl 12(%esi), %eax
+.LBB237_39:
+ jne .LBB237_40
+# BB#41:
+ xorl %esi, %esi
+ jmp .LBB237_42
+.LBB237_40:
+ movl 8(%esi), %esi
+.LBB237_42:
+ addl 44(%esp), %ebx # 4-byte Folded Reload
+ movl 24(%esp), %edi # 4-byte Reload
+ adcl 36(%esp), %edi # 4-byte Folded Reload
+ movl %ebx, 60(%ecx)
+ adcl 40(%esp), %esi # 4-byte Folded Reload
+ movl %edi, 64(%ecx)
+ adcl 48(%esp), %eax # 4-byte Folded Reload
+ movl %esi, 68(%ecx)
+ adcl 52(%esp), %ebp # 4-byte Folded Reload
+ movl %eax, 72(%ecx)
+ adcl 56(%esp), %edx # 4-byte Folded Reload
+ movl %ebp, 76(%ecx)
+ movl (%esp), %esi # 4-byte Reload
+ adcl 64(%esp), %esi # 4-byte Folded Reload
+ movl %edx, 80(%ecx)
+ movl 4(%esp), %eax # 4-byte Reload
+ adcl 68(%esp), %eax # 4-byte Folded Reload
+ movl %esi, 84(%ecx)
+ movl 8(%esp), %edx # 4-byte Reload
+ adcl 72(%esp), %edx # 4-byte Folded Reload
+ movl %eax, 88(%ecx)
+ movl 12(%esp), %eax # 4-byte Reload
+ adcl 76(%esp), %eax # 4-byte Folded Reload
+ movl %edx, 92(%ecx)
+ movl 16(%esp), %edx # 4-byte Reload
+ adcl 80(%esp), %edx # 4-byte Folded Reload
+ movl %eax, 96(%ecx)
+ movl 20(%esp), %eax # 4-byte Reload
+ adcl 84(%esp), %eax # 4-byte Folded Reload
+ movl %edx, 100(%ecx)
+ movl 28(%esp), %edx # 4-byte Reload
+ adcl 88(%esp), %edx # 4-byte Folded Reload
+ movl %eax, 104(%ecx)
+ movl 32(%esp), %eax # 4-byte Reload
+ adcl 92(%esp), %eax # 4-byte Folded Reload
+ movl %edx, 108(%ecx)
+ movl %eax, 112(%ecx)
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 96(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 116(%ecx)
+ addl $100, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end237:
+ .size mcl_fpDbl_sub15L, .Lfunc_end237-mcl_fpDbl_sub15L
+
+ .align 16, 0x90
+ .type .LmulPv512x32,@function
+.LmulPv512x32: # @mulPv512x32
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $112, %esp
+ movl %edx, %ebp
+ movl 132(%esp), %ebx
+ movl %ebx, %eax
+ mull 60(%ebp)
+ movl %edx, 108(%esp) # 4-byte Spill
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl %ebx, %eax
+ mull 56(%ebp)
+ movl %edx, 100(%esp) # 4-byte Spill
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl %ebx, %eax
+ mull 52(%ebp)
+ movl %edx, 92(%esp) # 4-byte Spill
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl %ebx, %eax
+ mull 48(%ebp)
+ movl %edx, 84(%esp) # 4-byte Spill
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl %ebx, %eax
+ mull 44(%ebp)
+ movl %edx, 76(%esp) # 4-byte Spill
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl %ebx, %eax
+ mull 40(%ebp)
+ movl %edx, 68(%esp) # 4-byte Spill
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl %ebx, %eax
+ mull 36(%ebp)
+ movl %edx, 60(%esp) # 4-byte Spill
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl %ebx, %eax
+ mull 32(%ebp)
+ movl %edx, 52(%esp) # 4-byte Spill
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl %ebx, %eax
+ mull 28(%ebp)
+ movl %edx, 44(%esp) # 4-byte Spill
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl %ebx, %eax
+ mull 24(%ebp)
+ movl %edx, 36(%esp) # 4-byte Spill
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl %ebx, %eax
+ mull 20(%ebp)
+ movl %edx, 28(%esp) # 4-byte Spill
+ movl %eax, 24(%esp) # 4-byte Spill
+ movl %ebx, %eax
+ mull 16(%ebp)
+ movl %edx, 20(%esp) # 4-byte Spill
+ movl %eax, 16(%esp) # 4-byte Spill
+ movl %ebx, %eax
+ mull 12(%ebp)
+ movl %edx, 12(%esp) # 4-byte Spill
+ movl %eax, 8(%esp) # 4-byte Spill
+ movl %ebx, %eax
+ mull 8(%ebp)
+ movl %edx, %esi
+ movl %eax, 4(%esp) # 4-byte Spill
+ movl %ebx, %eax
+ mull 4(%ebp)
+ movl %edx, %edi
+ movl %eax, (%esp) # 4-byte Spill
+ movl %ebx, %eax
+ mull (%ebp)
+ movl %eax, (%ecx)
+ addl (%esp), %edx # 4-byte Folded Reload
+ movl %edx, 4(%ecx)
+ adcl 4(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 8(%ecx)
+ adcl 8(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 12(%ecx)
+ movl 12(%esp), %eax # 4-byte Reload
+ adcl 16(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 16(%ecx)
+ movl 20(%esp), %eax # 4-byte Reload
+ adcl 24(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 20(%ecx)
+ movl 28(%esp), %eax # 4-byte Reload
+ adcl 32(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 24(%ecx)
+ movl 36(%esp), %eax # 4-byte Reload
+ adcl 40(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 28(%ecx)
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 48(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 32(%ecx)
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 56(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 36(%ecx)
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 64(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 40(%ecx)
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 72(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 44(%ecx)
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 80(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 48(%ecx)
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 88(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 52(%ecx)
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 96(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 56(%ecx)
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 104(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 60(%ecx)
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl $0, %eax
+ movl %eax, 64(%ecx)
+ movl %ecx, %eax
+ addl $112, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end238:
+ .size .LmulPv512x32, .Lfunc_end238-.LmulPv512x32
+
+ .globl mcl_fp_mulUnitPre16L
+ .align 16, 0x90
+ .type mcl_fp_mulUnitPre16L,@function
+mcl_fp_mulUnitPre16L: # @mcl_fp_mulUnitPre16L
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $124, %esp
+ calll .L239$pb
+.L239$pb:
+ popl %ebx
+.Ltmp50:
+ addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp50-.L239$pb), %ebx
+ movl 152(%esp), %eax
+ movl %eax, (%esp)
+ leal 56(%esp), %ecx
+ movl 148(%esp), %edx
+ calll .LmulPv512x32
+ movl 120(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 116(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 112(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 108(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 104(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 100(%esp), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 96(%esp), %eax
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl 92(%esp), %eax
+ movl %eax, 24(%esp) # 4-byte Spill
+ movl 88(%esp), %eax
+ movl %eax, 20(%esp) # 4-byte Spill
+ movl 84(%esp), %eax
+ movl %eax, 16(%esp) # 4-byte Spill
+ movl 80(%esp), %eax
+ movl %eax, 12(%esp) # 4-byte Spill
+ movl 76(%esp), %ebp
+ movl 72(%esp), %ebx
+ movl 68(%esp), %edi
+ movl 64(%esp), %esi
+ movl 56(%esp), %edx
+ movl 60(%esp), %ecx
+ movl 144(%esp), %eax
+ movl %edx, (%eax)
+ movl %ecx, 4(%eax)
+ movl %esi, 8(%eax)
+ movl %edi, 12(%eax)
+ movl %ebx, 16(%eax)
+ movl %ebp, 20(%eax)
+ movl 12(%esp), %ecx # 4-byte Reload
+ movl %ecx, 24(%eax)
+ movl 16(%esp), %ecx # 4-byte Reload
+ movl %ecx, 28(%eax)
+ movl 20(%esp), %ecx # 4-byte Reload
+ movl %ecx, 32(%eax)
+ movl 24(%esp), %ecx # 4-byte Reload
+ movl %ecx, 36(%eax)
+ movl 28(%esp), %ecx # 4-byte Reload
+ movl %ecx, 40(%eax)
+ movl 32(%esp), %ecx # 4-byte Reload
+ movl %ecx, 44(%eax)
+ movl 36(%esp), %ecx # 4-byte Reload
+ movl %ecx, 48(%eax)
+ movl 40(%esp), %ecx # 4-byte Reload
+ movl %ecx, 52(%eax)
+ movl 44(%esp), %ecx # 4-byte Reload
+ movl %ecx, 56(%eax)
+ movl 48(%esp), %ecx # 4-byte Reload
+ movl %ecx, 60(%eax)
+ movl 52(%esp), %ecx # 4-byte Reload
+ movl %ecx, 64(%eax)
+ addl $124, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end239:
+ .size mcl_fp_mulUnitPre16L, .Lfunc_end239-mcl_fp_mulUnitPre16L
+
+ .globl mcl_fpDbl_mulPre16L
+ .align 16, 0x90
+ .type mcl_fpDbl_mulPre16L,@function
+mcl_fpDbl_mulPre16L: # @mcl_fpDbl_mulPre16L
+# BB#0:
+ pushl %ebp
+ movl %esp, %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $300, %esp # imm = 0x12C
+ calll .L240$pb
+.L240$pb:
+ popl %ebx
+.Ltmp51:
+ addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp51-.L240$pb), %ebx
+ movl %ebx, -224(%ebp) # 4-byte Spill
+ movl 16(%ebp), %edi
+ movl %edi, 8(%esp)
+ movl 12(%ebp), %esi
+ movl %esi, 4(%esp)
+ movl 8(%ebp), %eax
+ movl %eax, (%esp)
+ calll mcl_fpDbl_mulPre8L@PLT
+ leal 32(%edi), %eax
+ movl %eax, 8(%esp)
+ leal 32(%esi), %eax
+ movl %eax, 4(%esp)
+ movl 8(%ebp), %eax
+ leal 64(%eax), %eax
+ movl %eax, (%esp)
+ calll mcl_fpDbl_mulPre8L@PLT
+ movl 52(%esi), %ebx
+ movl 48(%esi), %eax
+ movl 44(%esi), %ecx
+ movl 40(%esi), %edx
+ movl %edx, -176(%ebp) # 4-byte Spill
+ movl (%esi), %edi
+ movl 4(%esi), %edx
+ addl 32(%esi), %edi
+ movl %edi, -184(%ebp) # 4-byte Spill
+ movl %esi, %edi
+ adcl 36(%edi), %edx
+ movl %edx, -236(%ebp) # 4-byte Spill
+ movl -176(%ebp), %edx # 4-byte Reload
+ adcl 8(%edi), %edx
+ movl %edx, -176(%ebp) # 4-byte Spill
+ adcl 12(%edi), %ecx
+ movl %ecx, -232(%ebp) # 4-byte Spill
+ adcl 16(%edi), %eax
+ movl %eax, -212(%ebp) # 4-byte Spill
+ adcl 20(%edi), %ebx
+ movl %ebx, -228(%ebp) # 4-byte Spill
+ movl 56(%edi), %eax
+ adcl 24(%edi), %eax
+ movl %eax, -248(%ebp) # 4-byte Spill
+ pushl %eax
+ seto %al
+ lahf
+ movl %eax, %ecx
+ popl %eax
+ movl %ecx, -144(%ebp) # 4-byte Spill
+ movl 16(%ebp), %esi
+ movl (%esi), %ecx
+ addl 32(%esi), %ecx
+ movl %ecx, -188(%ebp) # 4-byte Spill
+ movl 4(%esi), %ecx
+ adcl 36(%esi), %ecx
+ movl %ecx, -192(%ebp) # 4-byte Spill
+ movl 40(%esi), %ecx
+ adcl 8(%esi), %ecx
+ movl %ecx, -196(%ebp) # 4-byte Spill
+ movl 44(%esi), %ecx
+ adcl 12(%esi), %ecx
+ movl %ecx, -200(%ebp) # 4-byte Spill
+ movl 48(%esi), %ecx
+ adcl 16(%esi), %ecx
+ movl %ecx, -204(%ebp) # 4-byte Spill
+ movl 52(%esi), %ecx
+ adcl 20(%esi), %ecx
+ movl %ecx, -208(%ebp) # 4-byte Spill
+ movl 56(%esi), %edx
+ adcl 24(%esi), %edx
+ movl 60(%esi), %ecx
+ adcl 28(%esi), %ecx
+ pushl %eax
+ seto %al
+ lahf
+ movl %eax, %ebx
+ popl %eax
+ movl %ebx, -252(%ebp) # 4-byte Spill
+ movl -212(%ebp), %ebx # 4-byte Reload
+ movl -176(%ebp), %esi # 4-byte Reload
+ movl %esi, -216(%ebp) # 4-byte Spill
+ movl -184(%ebp), %esi # 4-byte Reload
+ movl %esi, -220(%ebp) # 4-byte Spill
+ jb .LBB240_2
+# BB#1:
+ xorl %eax, %eax
+ xorl %ebx, %ebx
+ movl $0, -216(%ebp) # 4-byte Folded Spill
+ movl $0, -220(%ebp) # 4-byte Folded Spill
+.LBB240_2:
+ movl %ebx, -244(%ebp) # 4-byte Spill
+ movl %eax, -240(%ebp) # 4-byte Spill
+ movl 60(%edi), %eax
+ movl -144(%ebp), %ebx # 4-byte Reload
+ pushl %eax
+ movl %ebx, %eax
+ addb $127, %al
+ sahf
+ popl %eax
+ adcl 28(%edi), %eax
+ movl %eax, -180(%ebp) # 4-byte Spill
+ movl %ecx, -172(%ebp) # 4-byte Spill
+ movl %edx, -144(%ebp) # 4-byte Spill
+ movl -208(%ebp), %eax # 4-byte Reload
+ movl %eax, -148(%ebp) # 4-byte Spill
+ movl -204(%ebp), %eax # 4-byte Reload
+ movl %eax, -152(%ebp) # 4-byte Spill
+ movl -200(%ebp), %eax # 4-byte Reload
+ movl %eax, -156(%ebp) # 4-byte Spill
+ movl -196(%ebp), %eax # 4-byte Reload
+ movl %eax, -160(%ebp) # 4-byte Spill
+ movl -192(%ebp), %eax # 4-byte Reload
+ movl %eax, -164(%ebp) # 4-byte Spill
+ movl -188(%ebp), %eax # 4-byte Reload
+ movl %eax, -168(%ebp) # 4-byte Spill
+ jb .LBB240_4
+# BB#3:
+ movl $0, -172(%ebp) # 4-byte Folded Spill
+ movl $0, -144(%ebp) # 4-byte Folded Spill
+ movl $0, -148(%ebp) # 4-byte Folded Spill
+ movl $0, -152(%ebp) # 4-byte Folded Spill
+ movl $0, -156(%ebp) # 4-byte Folded Spill
+ movl $0, -160(%ebp) # 4-byte Folded Spill
+ movl $0, -164(%ebp) # 4-byte Folded Spill
+ movl $0, -168(%ebp) # 4-byte Folded Spill
+.LBB240_4:
+ movl -184(%ebp), %eax # 4-byte Reload
+ movl %eax, -108(%ebp)
+ movl -236(%ebp), %eax # 4-byte Reload
+ movl %eax, -104(%ebp)
+ movl -176(%ebp), %edi # 4-byte Reload
+ movl %edi, -100(%ebp)
+ movl -232(%ebp), %edi # 4-byte Reload
+ movl %edi, -96(%ebp)
+ movl -212(%ebp), %esi # 4-byte Reload
+ movl %esi, -92(%ebp)
+ movl -228(%ebp), %esi # 4-byte Reload
+ movl %esi, -88(%ebp)
+ movl -248(%ebp), %ebx # 4-byte Reload
+ movl %ebx, -84(%ebp)
+ movl -188(%ebp), %ebx # 4-byte Reload
+ movl %ebx, -140(%ebp)
+ movl -192(%ebp), %ebx # 4-byte Reload
+ movl %ebx, -136(%ebp)
+ movl -196(%ebp), %ebx # 4-byte Reload
+ movl %ebx, -132(%ebp)
+ movl -200(%ebp), %ebx # 4-byte Reload
+ movl %ebx, -128(%ebp)
+ movl -204(%ebp), %ebx # 4-byte Reload
+ movl %ebx, -124(%ebp)
+ movl -208(%ebp), %ebx # 4-byte Reload
+ movl %ebx, -120(%ebp)
+ movl %esi, %ebx
+ movl %edi, %esi
+ movl %eax, %edi
+ movl %edx, -116(%ebp)
+ movl %ecx, -112(%ebp)
+ sbbl %edx, %edx
+ movl -180(%ebp), %eax # 4-byte Reload
+ movl %eax, -80(%ebp)
+ movl -252(%ebp), %ecx # 4-byte Reload
+ pushl %eax
+ movl %ecx, %eax
+ addb $127, %al
+ sahf
+ popl %eax
+ jb .LBB240_6
+# BB#5:
+ movl $0, %eax
+ movl $0, %ebx
+ movl $0, %esi
+ movl $0, %edi
+.LBB240_6:
+ movl %eax, -180(%ebp) # 4-byte Spill
+ sbbl %eax, %eax
+ leal -140(%ebp), %ecx
+ movl %ecx, 8(%esp)
+ leal -108(%ebp), %ecx
+ movl %ecx, 4(%esp)
+ leal -76(%ebp), %ecx
+ movl %ecx, (%esp)
+ andl %eax, %edx
+ movl -220(%ebp), %eax # 4-byte Reload
+ addl %eax, -168(%ebp) # 4-byte Folded Spill
+ adcl %edi, -164(%ebp) # 4-byte Folded Spill
+ movl -216(%ebp), %eax # 4-byte Reload
+ adcl %eax, -160(%ebp) # 4-byte Folded Spill
+ adcl %esi, -156(%ebp) # 4-byte Folded Spill
+ movl -244(%ebp), %eax # 4-byte Reload
+ adcl %eax, -152(%ebp) # 4-byte Folded Spill
+ adcl %ebx, -148(%ebp) # 4-byte Folded Spill
+ movl -144(%ebp), %eax # 4-byte Reload
+ adcl -240(%ebp), %eax # 4-byte Folded Reload
+ movl %eax, -144(%ebp) # 4-byte Spill
+ movl -172(%ebp), %edi # 4-byte Reload
+ adcl -180(%ebp), %edi # 4-byte Folded Reload
+ sbbl %esi, %esi
+ andl $1, %esi
+ andl $1, %edx
+ movl %edx, -176(%ebp) # 4-byte Spill
+ movl -224(%ebp), %ebx # 4-byte Reload
+ calll mcl_fpDbl_mulPre8L@PLT
+ movl -168(%ebp), %eax # 4-byte Reload
+ addl -44(%ebp), %eax
+ movl %eax, -168(%ebp) # 4-byte Spill
+ movl -164(%ebp), %eax # 4-byte Reload
+ adcl -40(%ebp), %eax
+ movl %eax, -164(%ebp) # 4-byte Spill
+ movl -160(%ebp), %eax # 4-byte Reload
+ adcl -36(%ebp), %eax
+ movl %eax, -160(%ebp) # 4-byte Spill
+ movl -156(%ebp), %eax # 4-byte Reload
+ adcl -32(%ebp), %eax
+ movl %eax, -156(%ebp) # 4-byte Spill
+ movl -152(%ebp), %eax # 4-byte Reload
+ adcl -28(%ebp), %eax
+ movl %eax, -152(%ebp) # 4-byte Spill
+ movl -148(%ebp), %eax # 4-byte Reload
+ adcl -24(%ebp), %eax
+ movl %eax, -148(%ebp) # 4-byte Spill
+ movl -144(%ebp), %eax # 4-byte Reload
+ adcl -20(%ebp), %eax
+ movl %eax, -144(%ebp) # 4-byte Spill
+ adcl -16(%ebp), %edi
+ movl %edi, -172(%ebp) # 4-byte Spill
+ adcl %esi, -176(%ebp) # 4-byte Folded Spill
+ movl -76(%ebp), %eax
+ movl 8(%ebp), %esi
+ subl (%esi), %eax
+ movl %eax, -196(%ebp) # 4-byte Spill
+ movl -72(%ebp), %ecx
+ sbbl 4(%esi), %ecx
+ movl -68(%ebp), %eax
+ sbbl 8(%esi), %eax
+ movl %eax, -192(%ebp) # 4-byte Spill
+ movl -64(%ebp), %edx
+ sbbl 12(%esi), %edx
+ movl -60(%ebp), %ebx
+ sbbl 16(%esi), %ebx
+ movl -56(%ebp), %eax
+ sbbl 20(%esi), %eax
+ movl %eax, -180(%ebp) # 4-byte Spill
+ movl -52(%ebp), %eax
+ sbbl 24(%esi), %eax
+ movl %eax, -184(%ebp) # 4-byte Spill
+ movl -48(%ebp), %eax
+ sbbl 28(%esi), %eax
+ movl %eax, -188(%ebp) # 4-byte Spill
+ movl 32(%esi), %eax
+ movl %eax, -200(%ebp) # 4-byte Spill
+ sbbl %eax, -168(%ebp) # 4-byte Folded Spill
+ movl 36(%esi), %eax
+ movl %eax, -204(%ebp) # 4-byte Spill
+ sbbl %eax, -164(%ebp) # 4-byte Folded Spill
+ movl 40(%esi), %eax
+ movl %eax, -208(%ebp) # 4-byte Spill
+ sbbl %eax, -160(%ebp) # 4-byte Folded Spill
+ movl 44(%esi), %eax
+ movl %eax, -212(%ebp) # 4-byte Spill
+ sbbl %eax, -156(%ebp) # 4-byte Folded Spill
+ movl 48(%esi), %eax
+ movl %eax, -216(%ebp) # 4-byte Spill
+ sbbl %eax, -152(%ebp) # 4-byte Folded Spill
+ movl 52(%esi), %eax
+ movl %eax, -220(%ebp) # 4-byte Spill
+ sbbl %eax, -148(%ebp) # 4-byte Folded Spill
+ movl 56(%esi), %eax
+ movl %eax, -224(%ebp) # 4-byte Spill
+ movl -144(%ebp), %edi # 4-byte Reload
+ sbbl %eax, %edi
+ movl 60(%esi), %eax
+ movl %eax, -228(%ebp) # 4-byte Spill
+ sbbl %eax, -172(%ebp) # 4-byte Folded Spill
+ sbbl $0, -176(%ebp) # 4-byte Folded Spill
+ movl 64(%esi), %eax
+ movl %eax, -260(%ebp) # 4-byte Spill
+ subl %eax, -196(%ebp) # 4-byte Folded Spill
+ movl 68(%esi), %eax
+ movl %eax, -264(%ebp) # 4-byte Spill
+ sbbl %eax, %ecx
+ movl 72(%esi), %eax
+ movl %eax, -268(%ebp) # 4-byte Spill
+ sbbl %eax, -192(%ebp) # 4-byte Folded Spill
+ movl 76(%esi), %eax
+ movl %eax, -272(%ebp) # 4-byte Spill
+ sbbl %eax, %edx
+ movl 80(%esi), %eax
+ movl %eax, -276(%ebp) # 4-byte Spill
+ sbbl %eax, %ebx
+ movl 84(%esi), %eax
+ movl %eax, -280(%ebp) # 4-byte Spill
+ sbbl %eax, -180(%ebp) # 4-byte Folded Spill
+ movl 88(%esi), %eax
+ movl %eax, -284(%ebp) # 4-byte Spill
+ sbbl %eax, -184(%ebp) # 4-byte Folded Spill
+ movl 92(%esi), %eax
+ movl %eax, -288(%ebp) # 4-byte Spill
+ sbbl %eax, -188(%ebp) # 4-byte Folded Spill
+ movl 96(%esi), %eax
+ movl %eax, -292(%ebp) # 4-byte Spill
+ sbbl %eax, -168(%ebp) # 4-byte Folded Spill
+ movl 100(%esi), %eax
+ movl %eax, -236(%ebp) # 4-byte Spill
+ sbbl %eax, -164(%ebp) # 4-byte Folded Spill
+ movl 104(%esi), %eax
+ movl %eax, -240(%ebp) # 4-byte Spill
+ sbbl %eax, -160(%ebp) # 4-byte Folded Spill
+ movl 108(%esi), %eax
+ movl %eax, -244(%ebp) # 4-byte Spill
+ sbbl %eax, -156(%ebp) # 4-byte Folded Spill
+ movl 112(%esi), %eax
+ movl %eax, -248(%ebp) # 4-byte Spill
+ sbbl %eax, -152(%ebp) # 4-byte Folded Spill
+ movl 116(%esi), %eax
+ movl %eax, -252(%ebp) # 4-byte Spill
+ sbbl %eax, -148(%ebp) # 4-byte Folded Spill
+ movl 120(%esi), %eax
+ movl %eax, -232(%ebp) # 4-byte Spill
+ sbbl %eax, %edi
+ movl %edi, -144(%ebp) # 4-byte Spill
+ movl 124(%esi), %eax
+ movl %eax, -256(%ebp) # 4-byte Spill
+ sbbl %eax, -172(%ebp) # 4-byte Folded Spill
+ movl -176(%ebp), %edi # 4-byte Reload
+ sbbl $0, %edi
+ movl -196(%ebp), %eax # 4-byte Reload
+ addl -200(%ebp), %eax # 4-byte Folded Reload
+ adcl -204(%ebp), %ecx # 4-byte Folded Reload
+ movl %eax, 32(%esi)
+ movl -192(%ebp), %eax # 4-byte Reload
+ adcl -208(%ebp), %eax # 4-byte Folded Reload
+ movl %ecx, 36(%esi)
+ adcl -212(%ebp), %edx # 4-byte Folded Reload
+ movl %eax, 40(%esi)
+ adcl -216(%ebp), %ebx # 4-byte Folded Reload
+ movl %edx, 44(%esi)
+ movl -180(%ebp), %eax # 4-byte Reload
+ adcl -220(%ebp), %eax # 4-byte Folded Reload
+ movl %ebx, 48(%esi)
+ movl -184(%ebp), %ecx # 4-byte Reload
+ adcl -224(%ebp), %ecx # 4-byte Folded Reload
+ movl %eax, 52(%esi)
+ movl -188(%ebp), %edx # 4-byte Reload
+ adcl -228(%ebp), %edx # 4-byte Folded Reload
+ movl %ecx, 56(%esi)
+ movl -168(%ebp), %eax # 4-byte Reload
+ adcl -260(%ebp), %eax # 4-byte Folded Reload
+ movl %edx, 60(%esi)
+ movl -164(%ebp), %ecx # 4-byte Reload
+ adcl -264(%ebp), %ecx # 4-byte Folded Reload
+ movl %eax, 64(%esi)
+ movl -160(%ebp), %eax # 4-byte Reload
+ adcl -268(%ebp), %eax # 4-byte Folded Reload
+ movl %ecx, 68(%esi)
+ movl -156(%ebp), %ecx # 4-byte Reload
+ adcl -272(%ebp), %ecx # 4-byte Folded Reload
+ movl %eax, 72(%esi)
+ movl -152(%ebp), %eax # 4-byte Reload
+ adcl -276(%ebp), %eax # 4-byte Folded Reload
+ movl %ecx, 76(%esi)
+ movl -148(%ebp), %ecx # 4-byte Reload
+ adcl -280(%ebp), %ecx # 4-byte Folded Reload
+ movl %eax, 80(%esi)
+ movl -144(%ebp), %eax # 4-byte Reload
+ adcl -284(%ebp), %eax # 4-byte Folded Reload
+ movl %ecx, 84(%esi)
+ movl -172(%ebp), %ecx # 4-byte Reload
+ adcl -288(%ebp), %ecx # 4-byte Folded Reload
+ movl %eax, 88(%esi)
+ adcl -292(%ebp), %edi # 4-byte Folded Reload
+ movl %ecx, 92(%esi)
+ movl %edi, 96(%esi)
+ movl -236(%ebp), %eax # 4-byte Reload
+ adcl $0, %eax
+ movl %eax, 100(%esi)
+ movl -240(%ebp), %eax # 4-byte Reload
+ adcl $0, %eax
+ movl %eax, 104(%esi)
+ movl -244(%ebp), %eax # 4-byte Reload
+ adcl $0, %eax
+ movl %eax, 108(%esi)
+ movl -248(%ebp), %eax # 4-byte Reload
+ adcl $0, %eax
+ movl %eax, 112(%esi)
+ movl -252(%ebp), %eax # 4-byte Reload
+ adcl $0, %eax
+ movl %eax, 116(%esi)
+ movl -232(%ebp), %eax # 4-byte Reload
+ adcl $0, %eax
+ movl %eax, 120(%esi)
+ movl -256(%ebp), %eax # 4-byte Reload
+ adcl $0, %eax
+ movl %eax, 124(%esi)
+ addl $300, %esp # imm = 0x12C
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end240:
+ .size mcl_fpDbl_mulPre16L, .Lfunc_end240-mcl_fpDbl_mulPre16L
+
+ .globl mcl_fpDbl_sqrPre16L
+ .align 16, 0x90
+ .type mcl_fpDbl_sqrPre16L,@function
+mcl_fpDbl_sqrPre16L: # @mcl_fpDbl_sqrPre16L
+# BB#0:
+ pushl %ebp
+ movl %esp, %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $300, %esp # imm = 0x12C
+ calll .L241$pb
+.L241$pb:
+ popl %ebx
+.Ltmp52:
+ addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp52-.L241$pb), %ebx
+ movl %ebx, -184(%ebp) # 4-byte Spill
+ movl 12(%ebp), %edi
+ movl %edi, 8(%esp)
+ movl %edi, 4(%esp)
+ movl 8(%ebp), %esi
+ movl %esi, (%esp)
+ calll mcl_fpDbl_mulPre8L@PLT
+ leal 32(%edi), %eax
+ movl %eax, 8(%esp)
+ movl %eax, 4(%esp)
+ leal 64(%esi), %eax
+ movl %eax, (%esp)
+ calll mcl_fpDbl_mulPre8L@PLT
+ movl 52(%edi), %eax
+ movl %eax, -180(%ebp) # 4-byte Spill
+ movl 48(%edi), %eax
+ movl 44(%edi), %ebx
+ movl 40(%edi), %esi
+ movl (%edi), %ecx
+ movl 4(%edi), %edx
+ addl 32(%edi), %ecx
+ movl %ecx, -192(%ebp) # 4-byte Spill
+ adcl 36(%edi), %edx
+ movl %edx, -196(%ebp) # 4-byte Spill
+ adcl 8(%edi), %esi
+ movl %esi, -188(%ebp) # 4-byte Spill
+ adcl 12(%edi), %ebx
+ adcl 16(%edi), %eax
+ movl %eax, -208(%ebp) # 4-byte Spill
+ movl -180(%ebp), %eax # 4-byte Reload
+ adcl 20(%edi), %eax
+ movl %eax, -180(%ebp) # 4-byte Spill
+ seto %al
+ lahf
+ movl %eax, %eax
+ movl %eax, -168(%ebp) # 4-byte Spill
+ addl %ecx, %ecx
+ movl %ecx, -164(%ebp) # 4-byte Spill
+ adcl %edx, %edx
+ movl %edx, -160(%ebp) # 4-byte Spill
+ adcl %esi, %esi
+ movl %esi, -156(%ebp) # 4-byte Spill
+ movl %ebx, %edx
+ movl %ebx, %esi
+ adcl %edx, %edx
+ movl %edx, -152(%ebp) # 4-byte Spill
+ movl -208(%ebp), %eax # 4-byte Reload
+ movl %eax, %edx
+ movl %eax, %ebx
+ adcl %edx, %edx
+ movl %edx, -148(%ebp) # 4-byte Spill
+ movl -180(%ebp), %edx # 4-byte Reload
+ adcl %edx, %edx
+ movl %edx, -144(%ebp) # 4-byte Spill
+ seto %al
+ lahf
+ movl %eax, %eax
+ movl %eax, -172(%ebp) # 4-byte Spill
+ movl 56(%edi), %edx
+ movl -168(%ebp), %eax # 4-byte Reload
+ movl %eax, %eax
+ addb $127, %al
+ sahf
+ adcl 24(%edi), %edx
+ movl 60(%edi), %ecx
+ adcl 28(%edi), %ecx
+ seto %al
+ lahf
+ movl %eax, %eax
+ movl %eax, -200(%ebp) # 4-byte Spill
+ seto %al
+ lahf
+ movl %eax, %eax
+ movl %eax, -204(%ebp) # 4-byte Spill
+ seto %al
+ lahf
+ movl %eax, %edi
+ sbbl %eax, %eax
+ movl %eax, -168(%ebp) # 4-byte Spill
+ movl %edi, %eax
+ addb $127, %al
+ sahf
+ jb .LBB241_2
+# BB#1:
+ movl $0, -144(%ebp) # 4-byte Folded Spill
+ movl $0, -148(%ebp) # 4-byte Folded Spill
+ movl $0, -152(%ebp) # 4-byte Folded Spill
+ movl $0, -156(%ebp) # 4-byte Folded Spill
+ movl $0, -160(%ebp) # 4-byte Folded Spill
+ movl $0, -164(%ebp) # 4-byte Folded Spill
+.LBB241_2:
+ movl %edx, %eax
+ movl -172(%ebp), %edi # 4-byte Reload
+ pushl %eax
+ movl %edi, %eax
+ addb $127, %al
+ sahf
+ popl %eax
+ adcl %eax, %eax
+ movl %ecx, %edi
+ adcl %edi, %edi
+ movl %edi, -176(%ebp) # 4-byte Spill
+ movl -204(%ebp), %edi # 4-byte Reload
+ pushl %eax
+ movl %edi, %eax
+ addb $127, %al
+ sahf
+ popl %eax
+ jb .LBB241_4
+# BB#3:
+ movl $0, -176(%ebp) # 4-byte Folded Spill
+ xorl %eax, %eax
+.LBB241_4:
+ movl %eax, -172(%ebp) # 4-byte Spill
+ movl -192(%ebp), %eax # 4-byte Reload
+ movl %eax, -108(%ebp)
+ movl %eax, -140(%ebp)
+ movl -196(%ebp), %eax # 4-byte Reload
+ movl %eax, -104(%ebp)
+ movl %eax, -136(%ebp)
+ movl -188(%ebp), %eax # 4-byte Reload
+ movl %eax, -100(%ebp)
+ movl %eax, -132(%ebp)
+ movl %esi, -96(%ebp)
+ movl %esi, -128(%ebp)
+ movl %ebx, -92(%ebp)
+ movl %ebx, -124(%ebp)
+ movl -180(%ebp), %eax # 4-byte Reload
+ movl %eax, -88(%ebp)
+ movl %eax, -120(%ebp)
+ movl %edx, -84(%ebp)
+ movl %edx, -116(%ebp)
+ movl %ecx, -80(%ebp)
+ movl %ecx, -112(%ebp)
+ movl -200(%ebp), %eax # 4-byte Reload
+ movl %eax, %eax
+ addb $127, %al
+ sahf
+ jb .LBB241_5
+# BB#6:
+ xorl %edi, %edi
+ jmp .LBB241_7
+.LBB241_5:
+ shrl $31, %ecx
+ movl %ecx, %edi
+.LBB241_7:
+ leal -140(%ebp), %eax
+ movl %eax, 8(%esp)
+ leal -108(%ebp), %eax
+ movl %eax, 4(%esp)
+ leal -76(%ebp), %eax
+ movl %eax, (%esp)
+ movl -168(%ebp), %esi # 4-byte Reload
+ andl $1, %esi
+ movl -184(%ebp), %ebx # 4-byte Reload
+ calll mcl_fpDbl_mulPre8L@PLT
+ movl -164(%ebp), %eax # 4-byte Reload
+ addl -44(%ebp), %eax
+ movl %eax, -164(%ebp) # 4-byte Spill
+ movl -160(%ebp), %eax # 4-byte Reload
+ adcl -40(%ebp), %eax
+ movl %eax, -160(%ebp) # 4-byte Spill
+ movl -156(%ebp), %eax # 4-byte Reload
+ adcl -36(%ebp), %eax
+ movl %eax, -156(%ebp) # 4-byte Spill
+ movl -152(%ebp), %eax # 4-byte Reload
+ adcl -32(%ebp), %eax
+ movl %eax, -152(%ebp) # 4-byte Spill
+ movl -148(%ebp), %eax # 4-byte Reload
+ adcl -28(%ebp), %eax
+ movl %eax, -148(%ebp) # 4-byte Spill
+ movl -144(%ebp), %eax # 4-byte Reload
+ adcl -24(%ebp), %eax
+ movl %eax, -144(%ebp) # 4-byte Spill
+ movl -172(%ebp), %eax # 4-byte Reload
+ adcl -20(%ebp), %eax
+ movl %eax, -172(%ebp) # 4-byte Spill
+ movl -176(%ebp), %eax # 4-byte Reload
+ adcl -16(%ebp), %eax
+ adcl %edi, %esi
+ movl %esi, -168(%ebp) # 4-byte Spill
+ movl -76(%ebp), %ecx
+ movl 8(%ebp), %esi
+ subl (%esi), %ecx
+ movl %ecx, -180(%ebp) # 4-byte Spill
+ movl -72(%ebp), %edi
+ sbbl 4(%esi), %edi
+ movl -68(%ebp), %edx
+ sbbl 8(%esi), %edx
+ movl %edx, -184(%ebp) # 4-byte Spill
+ movl -64(%ebp), %edx
+ sbbl 12(%esi), %edx
+ movl %edx, -192(%ebp) # 4-byte Spill
+ movl -60(%ebp), %ebx
+ sbbl 16(%esi), %ebx
+ movl %eax, %ecx
+ movl -56(%ebp), %eax
+ sbbl 20(%esi), %eax
+ movl %eax, -196(%ebp) # 4-byte Spill
+ movl -52(%ebp), %edx
+ sbbl 24(%esi), %edx
+ movl %edx, -188(%ebp) # 4-byte Spill
+ movl -48(%ebp), %edx
+ sbbl 28(%esi), %edx
+ movl 32(%esi), %eax
+ movl %eax, -200(%ebp) # 4-byte Spill
+ sbbl %eax, -164(%ebp) # 4-byte Folded Spill
+ movl 36(%esi), %eax
+ movl %eax, -204(%ebp) # 4-byte Spill
+ sbbl %eax, -160(%ebp) # 4-byte Folded Spill
+ movl 40(%esi), %eax
+ movl %eax, -208(%ebp) # 4-byte Spill
+ sbbl %eax, -156(%ebp) # 4-byte Folded Spill
+ movl 44(%esi), %eax
+ movl %eax, -212(%ebp) # 4-byte Spill
+ sbbl %eax, -152(%ebp) # 4-byte Folded Spill
+ movl 48(%esi), %eax
+ movl %eax, -216(%ebp) # 4-byte Spill
+ sbbl %eax, -148(%ebp) # 4-byte Folded Spill
+ movl 52(%esi), %eax
+ movl %eax, -220(%ebp) # 4-byte Spill
+ sbbl %eax, -144(%ebp) # 4-byte Folded Spill
+ movl 56(%esi), %eax
+ movl %eax, -224(%ebp) # 4-byte Spill
+ sbbl %eax, -172(%ebp) # 4-byte Folded Spill
+ movl 60(%esi), %eax
+ movl %eax, -228(%ebp) # 4-byte Spill
+ sbbl %eax, %ecx
+ movl %ecx, -176(%ebp) # 4-byte Spill
+ movl -168(%ebp), %eax # 4-byte Reload
+ sbbl $0, %eax
+ movl 64(%esi), %ecx
+ movl %ecx, -260(%ebp) # 4-byte Spill
+ subl %ecx, -180(%ebp) # 4-byte Folded Spill
+ movl 68(%esi), %ecx
+ movl %ecx, -264(%ebp) # 4-byte Spill
+ sbbl %ecx, %edi
+ movl 72(%esi), %ecx
+ movl %ecx, -268(%ebp) # 4-byte Spill
+ sbbl %ecx, -184(%ebp) # 4-byte Folded Spill
+ movl 76(%esi), %ecx
+ movl %ecx, -272(%ebp) # 4-byte Spill
+ sbbl %ecx, -192(%ebp) # 4-byte Folded Spill
+ movl 80(%esi), %ecx
+ movl %ecx, -276(%ebp) # 4-byte Spill
+ sbbl %ecx, %ebx
+ movl 84(%esi), %ecx
+ movl %ecx, -280(%ebp) # 4-byte Spill
+ sbbl %ecx, -196(%ebp) # 4-byte Folded Spill
+ movl 88(%esi), %ecx
+ movl %ecx, -284(%ebp) # 4-byte Spill
+ sbbl %ecx, -188(%ebp) # 4-byte Folded Spill
+ movl 92(%esi), %ecx
+ movl %ecx, -288(%ebp) # 4-byte Spill
+ sbbl %ecx, %edx
+ movl 96(%esi), %ecx
+ movl %ecx, -292(%ebp) # 4-byte Spill
+ sbbl %ecx, -164(%ebp) # 4-byte Folded Spill
+ movl 100(%esi), %ecx
+ movl %ecx, -232(%ebp) # 4-byte Spill
+ sbbl %ecx, -160(%ebp) # 4-byte Folded Spill
+ movl 104(%esi), %ecx
+ movl %ecx, -236(%ebp) # 4-byte Spill
+ sbbl %ecx, -156(%ebp) # 4-byte Folded Spill
+ movl 108(%esi), %ecx
+ movl %ecx, -240(%ebp) # 4-byte Spill
+ sbbl %ecx, -152(%ebp) # 4-byte Folded Spill
+ movl 112(%esi), %ecx
+ movl %ecx, -244(%ebp) # 4-byte Spill
+ sbbl %ecx, -148(%ebp) # 4-byte Folded Spill
+ movl 116(%esi), %ecx
+ movl %ecx, -248(%ebp) # 4-byte Spill
+ sbbl %ecx, -144(%ebp) # 4-byte Folded Spill
+ movl 120(%esi), %ecx
+ movl %ecx, -252(%ebp) # 4-byte Spill
+ sbbl %ecx, -172(%ebp) # 4-byte Folded Spill
+ movl 124(%esi), %ecx
+ movl %ecx, -256(%ebp) # 4-byte Spill
+ sbbl %ecx, -176(%ebp) # 4-byte Folded Spill
+ sbbl $0, %eax
+ movl %eax, -168(%ebp) # 4-byte Spill
+ movl -180(%ebp), %eax # 4-byte Reload
+ addl -200(%ebp), %eax # 4-byte Folded Reload
+ adcl -204(%ebp), %edi # 4-byte Folded Reload
+ movl %eax, 32(%esi)
+ movl -184(%ebp), %eax # 4-byte Reload
+ adcl -208(%ebp), %eax # 4-byte Folded Reload
+ movl %edi, 36(%esi)
+ movl -192(%ebp), %ecx # 4-byte Reload
+ adcl -212(%ebp), %ecx # 4-byte Folded Reload
+ movl %eax, 40(%esi)
+ adcl -216(%ebp), %ebx # 4-byte Folded Reload
+ movl %ecx, 44(%esi)
+ movl -196(%ebp), %ecx # 4-byte Reload
+ adcl -220(%ebp), %ecx # 4-byte Folded Reload
+ movl %ebx, 48(%esi)
+ movl -188(%ebp), %eax # 4-byte Reload
+ adcl -224(%ebp), %eax # 4-byte Folded Reload
+ movl %ecx, 52(%esi)
+ movl %edx, %ecx
+ adcl -228(%ebp), %ecx # 4-byte Folded Reload
+ movl %eax, 56(%esi)
+ movl -164(%ebp), %eax # 4-byte Reload
+ adcl -260(%ebp), %eax # 4-byte Folded Reload
+ movl %ecx, 60(%esi)
+ movl -160(%ebp), %ecx # 4-byte Reload
+ adcl -264(%ebp), %ecx # 4-byte Folded Reload
+ movl %eax, 64(%esi)
+ movl -156(%ebp), %eax # 4-byte Reload
+ adcl -268(%ebp), %eax # 4-byte Folded Reload
+ movl %ecx, 68(%esi)
+ movl -152(%ebp), %ecx # 4-byte Reload
+ adcl -272(%ebp), %ecx # 4-byte Folded Reload
+ movl %eax, 72(%esi)
+ movl -148(%ebp), %eax # 4-byte Reload
+ adcl -276(%ebp), %eax # 4-byte Folded Reload
+ movl %ecx, 76(%esi)
+ movl -144(%ebp), %ecx # 4-byte Reload
+ adcl -280(%ebp), %ecx # 4-byte Folded Reload
+ movl %eax, 80(%esi)
+ movl -172(%ebp), %eax # 4-byte Reload
+ adcl -284(%ebp), %eax # 4-byte Folded Reload
+ movl %ecx, 84(%esi)
+ movl -176(%ebp), %ecx # 4-byte Reload
+ adcl -288(%ebp), %ecx # 4-byte Folded Reload
+ movl %eax, 88(%esi)
+ movl -168(%ebp), %eax # 4-byte Reload
+ adcl -292(%ebp), %eax # 4-byte Folded Reload
+ movl %ecx, 92(%esi)
+ movl %eax, 96(%esi)
+ movl -232(%ebp), %eax # 4-byte Reload
+ adcl $0, %eax
+ movl %eax, 100(%esi)
+ movl -236(%ebp), %eax # 4-byte Reload
+ adcl $0, %eax
+ movl %eax, 104(%esi)
+ movl -240(%ebp), %eax # 4-byte Reload
+ adcl $0, %eax
+ movl %eax, 108(%esi)
+ movl -244(%ebp), %eax # 4-byte Reload
+ adcl $0, %eax
+ movl %eax, 112(%esi)
+ movl -248(%ebp), %eax # 4-byte Reload
+ adcl $0, %eax
+ movl %eax, 116(%esi)
+ movl -252(%ebp), %eax # 4-byte Reload
+ adcl $0, %eax
+ movl %eax, 120(%esi)
+ movl -256(%ebp), %eax # 4-byte Reload
+ adcl $0, %eax
+ movl %eax, 124(%esi)
+ addl $300, %esp # imm = 0x12C
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end241:
+ .size mcl_fpDbl_sqrPre16L, .Lfunc_end241-mcl_fpDbl_sqrPre16L
+
+ .globl mcl_fp_mont16L
+ .align 16, 0x90
+ .type mcl_fp_mont16L,@function
+mcl_fp_mont16L: # @mcl_fp_mont16L
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $2428, %esp # imm = 0x97C
+ calll .L242$pb
+.L242$pb:
+ popl %ebx
+.Ltmp53:
+ addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp53-.L242$pb), %ebx
+ movl 2460(%esp), %eax
+ movl -4(%eax), %esi
+ movl %esi, 56(%esp) # 4-byte Spill
+ movl 2456(%esp), %eax
+ movl (%eax), %eax
+ movl %eax, (%esp)
+ leal 2360(%esp), %ecx
+ movl 2452(%esp), %edx
+ calll .LmulPv512x32
+ movl 2360(%esp), %ebp
+ movl 2364(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl %ebp, %eax
+ imull %esi, %eax
+ movl 2424(%esp), %ecx
+ movl %ecx, 124(%esp) # 4-byte Spill
+ movl 2420(%esp), %ecx
+ movl %ecx, 120(%esp) # 4-byte Spill
+ movl 2416(%esp), %ecx
+ movl %ecx, 116(%esp) # 4-byte Spill
+ movl 2412(%esp), %ecx
+ movl %ecx, 96(%esp) # 4-byte Spill
+ movl 2408(%esp), %ecx
+ movl %ecx, 92(%esp) # 4-byte Spill
+ movl 2404(%esp), %ecx
+ movl %ecx, 84(%esp) # 4-byte Spill
+ movl 2400(%esp), %ecx
+ movl %ecx, 76(%esp) # 4-byte Spill
+ movl 2396(%esp), %ecx
+ movl %ecx, 68(%esp) # 4-byte Spill
+ movl 2392(%esp), %ecx
+ movl %ecx, 64(%esp) # 4-byte Spill
+ movl 2388(%esp), %ecx
+ movl %ecx, 72(%esp) # 4-byte Spill
+ movl 2384(%esp), %ecx
+ movl %ecx, 60(%esp) # 4-byte Spill
+ movl 2380(%esp), %edi
+ movl 2376(%esp), %ecx
+ movl %ecx, 88(%esp) # 4-byte Spill
+ movl 2372(%esp), %ecx
+ movl %ecx, 100(%esp) # 4-byte Spill
+ movl 2368(%esp), %esi
+ movl %eax, (%esp)
+ leal 2288(%esp), %ecx
+ movl 2460(%esp), %edx
+ calll .LmulPv512x32
+ addl 2288(%esp), %ebp
+ movl 112(%esp), %eax # 4-byte Reload
+ adcl 2292(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ adcl 2296(%esp), %esi
+ movl %esi, %ebp
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 2300(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 2304(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ adcl 2308(%esp), %edi
+ movl %edi, %esi
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 2312(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 2316(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 2320(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 2324(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 2328(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 2332(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 2336(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 2340(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 116(%esp), %eax # 4-byte Reload
+ adcl 2344(%esp), %eax
+ movl %eax, 116(%esp) # 4-byte Spill
+ movl 120(%esp), %eax # 4-byte Reload
+ adcl 2348(%esp), %eax
+ movl %eax, 120(%esp) # 4-byte Spill
+ movl 124(%esp), %eax # 4-byte Reload
+ adcl 2352(%esp), %eax
+ movl %eax, 124(%esp) # 4-byte Spill
+ sbbl %edi, %edi
+ movl 2456(%esp), %eax
+ movl 4(%eax), %eax
+ movl %eax, (%esp)
+ leal 2216(%esp), %ecx
+ movl 2452(%esp), %edx
+ calll .LmulPv512x32
+ andl $1, %edi
+ movl 112(%esp), %ecx # 4-byte Reload
+ addl 2216(%esp), %ecx
+ adcl 2220(%esp), %ebp
+ movl %ebp, 112(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 2224(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 2228(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ adcl 2232(%esp), %esi
+ movl %esi, 80(%esp) # 4-byte Spill
+ movl 60(%esp), %esi # 4-byte Reload
+ adcl 2236(%esp), %esi
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 2240(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 2244(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 2248(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 2252(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 2256(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 2260(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 2264(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 116(%esp), %eax # 4-byte Reload
+ adcl 2268(%esp), %eax
+ movl %eax, 116(%esp) # 4-byte Spill
+ movl 120(%esp), %eax # 4-byte Reload
+ adcl 2272(%esp), %eax
+ movl %eax, 120(%esp) # 4-byte Spill
+ movl 124(%esp), %eax # 4-byte Reload
+ adcl 2276(%esp), %eax
+ movl %eax, 124(%esp) # 4-byte Spill
+ adcl 2280(%esp), %edi
+ movl %edi, 108(%esp) # 4-byte Spill
+ sbbl %edi, %edi
+ movl %ecx, %eax
+ movl %ecx, %ebp
+ imull 56(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 2144(%esp), %ecx
+ movl 2460(%esp), %edx
+ calll .LmulPv512x32
+ andl $1, %edi
+ addl 2144(%esp), %ebp
+ movl 112(%esp), %eax # 4-byte Reload
+ adcl 2148(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 2152(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 2156(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 2160(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ adcl 2164(%esp), %esi
+ movl %esi, 60(%esp) # 4-byte Spill
+ movl 72(%esp), %ebp # 4-byte Reload
+ adcl 2168(%esp), %ebp
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 2172(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 2176(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 2180(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 2184(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 2188(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 2192(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 116(%esp), %eax # 4-byte Reload
+ adcl 2196(%esp), %eax
+ movl %eax, 116(%esp) # 4-byte Spill
+ movl 120(%esp), %eax # 4-byte Reload
+ adcl 2200(%esp), %eax
+ movl %eax, 120(%esp) # 4-byte Spill
+ movl 124(%esp), %eax # 4-byte Reload
+ adcl 2204(%esp), %eax
+ movl %eax, 124(%esp) # 4-byte Spill
+ movl 108(%esp), %esi # 4-byte Reload
+ adcl 2208(%esp), %esi
+ adcl $0, %edi
+ movl %edi, 104(%esp) # 4-byte Spill
+ movl 2456(%esp), %eax
+ movl 8(%eax), %eax
+ movl %eax, (%esp)
+ leal 2072(%esp), %ecx
+ movl 2452(%esp), %edx
+ calll .LmulPv512x32
+ movl 112(%esp), %ecx # 4-byte Reload
+ addl 2072(%esp), %ecx
+ movl %ecx, 112(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 2076(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 2080(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 2084(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 2088(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ adcl 2092(%esp), %ebp
+ movl %ebp, 72(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 2096(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 2100(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 2104(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 84(%esp), %ebp # 4-byte Reload
+ adcl 2108(%esp), %ebp
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 2112(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 96(%esp), %edi # 4-byte Reload
+ adcl 2116(%esp), %edi
+ movl 116(%esp), %eax # 4-byte Reload
+ adcl 2120(%esp), %eax
+ movl %eax, 116(%esp) # 4-byte Spill
+ movl 120(%esp), %eax # 4-byte Reload
+ adcl 2124(%esp), %eax
+ movl %eax, 120(%esp) # 4-byte Spill
+ movl 124(%esp), %eax # 4-byte Reload
+ adcl 2128(%esp), %eax
+ movl %eax, 124(%esp) # 4-byte Spill
+ adcl 2132(%esp), %esi
+ movl %esi, 108(%esp) # 4-byte Spill
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 2136(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ sbbl %esi, %esi
+ movl %ecx, %eax
+ imull 56(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 2000(%esp), %ecx
+ movl 2460(%esp), %edx
+ calll .LmulPv512x32
+ andl $1, %esi
+ movl %esi, %eax
+ movl 112(%esp), %ecx # 4-byte Reload
+ addl 2000(%esp), %ecx
+ movl 100(%esp), %ecx # 4-byte Reload
+ adcl 2004(%esp), %ecx
+ movl %ecx, 100(%esp) # 4-byte Spill
+ movl 88(%esp), %ecx # 4-byte Reload
+ adcl 2008(%esp), %ecx
+ movl %ecx, 88(%esp) # 4-byte Spill
+ movl 80(%esp), %ecx # 4-byte Reload
+ adcl 2012(%esp), %ecx
+ movl %ecx, 80(%esp) # 4-byte Spill
+ movl 60(%esp), %ecx # 4-byte Reload
+ adcl 2016(%esp), %ecx
+ movl %ecx, 60(%esp) # 4-byte Spill
+ movl 72(%esp), %ecx # 4-byte Reload
+ adcl 2020(%esp), %ecx
+ movl %ecx, 72(%esp) # 4-byte Spill
+ movl 64(%esp), %ecx # 4-byte Reload
+ adcl 2024(%esp), %ecx
+ movl %ecx, 64(%esp) # 4-byte Spill
+ movl 68(%esp), %ecx # 4-byte Reload
+ adcl 2028(%esp), %ecx
+ movl %ecx, 68(%esp) # 4-byte Spill
+ movl 76(%esp), %ecx # 4-byte Reload
+ adcl 2032(%esp), %ecx
+ movl %ecx, 76(%esp) # 4-byte Spill
+ adcl 2036(%esp), %ebp
+ movl %ebp, 84(%esp) # 4-byte Spill
+ movl 92(%esp), %ecx # 4-byte Reload
+ adcl 2040(%esp), %ecx
+ movl %ecx, 92(%esp) # 4-byte Spill
+ adcl 2044(%esp), %edi
+ movl 116(%esp), %ecx # 4-byte Reload
+ adcl 2048(%esp), %ecx
+ movl %ecx, 116(%esp) # 4-byte Spill
+ movl 120(%esp), %ebp # 4-byte Reload
+ adcl 2052(%esp), %ebp
+ movl 124(%esp), %esi # 4-byte Reload
+ adcl 2056(%esp), %esi
+ movl 108(%esp), %ecx # 4-byte Reload
+ adcl 2060(%esp), %ecx
+ movl %ecx, 108(%esp) # 4-byte Spill
+ movl 104(%esp), %ecx # 4-byte Reload
+ adcl 2064(%esp), %ecx
+ movl %ecx, 104(%esp) # 4-byte Spill
+ adcl $0, %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 2456(%esp), %eax
+ movl 12(%eax), %eax
+ movl %eax, (%esp)
+ leal 1928(%esp), %ecx
+ movl 2452(%esp), %edx
+ calll .LmulPv512x32
+ movl 100(%esp), %ecx # 4-byte Reload
+ addl 1928(%esp), %ecx
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 1932(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 1936(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 1940(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 1944(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 1948(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 1952(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 1956(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 1960(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 1964(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ adcl 1968(%esp), %edi
+ movl 116(%esp), %eax # 4-byte Reload
+ adcl 1972(%esp), %eax
+ movl %eax, 116(%esp) # 4-byte Spill
+ adcl 1976(%esp), %ebp
+ movl %ebp, 120(%esp) # 4-byte Spill
+ adcl 1980(%esp), %esi
+ movl %esi, 124(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 1984(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 1988(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 112(%esp), %eax # 4-byte Reload
+ adcl 1992(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ sbbl %ebp, %ebp
+ movl %ecx, %esi
+ movl %esi, %eax
+ imull 56(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 1856(%esp), %ecx
+ movl 2460(%esp), %eax
+ movl %eax, %edx
+ calll .LmulPv512x32
+ andl $1, %ebp
+ movl %ebp, %eax
+ addl 1856(%esp), %esi
+ movl 88(%esp), %ecx # 4-byte Reload
+ adcl 1860(%esp), %ecx
+ movl %ecx, 88(%esp) # 4-byte Spill
+ movl 80(%esp), %ecx # 4-byte Reload
+ adcl 1864(%esp), %ecx
+ movl %ecx, 80(%esp) # 4-byte Spill
+ movl 60(%esp), %ecx # 4-byte Reload
+ adcl 1868(%esp), %ecx
+ movl %ecx, 60(%esp) # 4-byte Spill
+ movl 72(%esp), %ecx # 4-byte Reload
+ adcl 1872(%esp), %ecx
+ movl %ecx, 72(%esp) # 4-byte Spill
+ movl 64(%esp), %ecx # 4-byte Reload
+ adcl 1876(%esp), %ecx
+ movl %ecx, 64(%esp) # 4-byte Spill
+ movl 68(%esp), %ecx # 4-byte Reload
+ adcl 1880(%esp), %ecx
+ movl %ecx, 68(%esp) # 4-byte Spill
+ movl 76(%esp), %ecx # 4-byte Reload
+ adcl 1884(%esp), %ecx
+ movl %ecx, 76(%esp) # 4-byte Spill
+ movl 84(%esp), %ecx # 4-byte Reload
+ adcl 1888(%esp), %ecx
+ movl %ecx, 84(%esp) # 4-byte Spill
+ movl 92(%esp), %esi # 4-byte Reload
+ adcl 1892(%esp), %esi
+ adcl 1896(%esp), %edi
+ movl %edi, 96(%esp) # 4-byte Spill
+ movl 116(%esp), %ebp # 4-byte Reload
+ adcl 1900(%esp), %ebp
+ movl 120(%esp), %ecx # 4-byte Reload
+ adcl 1904(%esp), %ecx
+ movl %ecx, 120(%esp) # 4-byte Spill
+ movl 124(%esp), %ecx # 4-byte Reload
+ adcl 1908(%esp), %ecx
+ movl %ecx, 124(%esp) # 4-byte Spill
+ movl 108(%esp), %ecx # 4-byte Reload
+ adcl 1912(%esp), %ecx
+ movl %ecx, 108(%esp) # 4-byte Spill
+ movl 104(%esp), %ecx # 4-byte Reload
+ adcl 1916(%esp), %ecx
+ movl %ecx, 104(%esp) # 4-byte Spill
+ movl 112(%esp), %ecx # 4-byte Reload
+ adcl 1920(%esp), %ecx
+ movl %ecx, 112(%esp) # 4-byte Spill
+ adcl $0, %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 2456(%esp), %eax
+ movl 16(%eax), %eax
+ movl %eax, (%esp)
+ leal 1784(%esp), %ecx
+ movl 2452(%esp), %eax
+ movl %eax, %edx
+ calll .LmulPv512x32
+ movl 88(%esp), %ecx # 4-byte Reload
+ addl 1784(%esp), %ecx
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 1788(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 1792(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 1796(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 1800(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 68(%esp), %edi # 4-byte Reload
+ adcl 1804(%esp), %edi
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 1808(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 1812(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ adcl 1816(%esp), %esi
+ movl %esi, 92(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 1820(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ adcl 1824(%esp), %ebp
+ movl %ebp, 116(%esp) # 4-byte Spill
+ movl 120(%esp), %eax # 4-byte Reload
+ adcl 1828(%esp), %eax
+ movl %eax, 120(%esp) # 4-byte Spill
+ movl 124(%esp), %eax # 4-byte Reload
+ adcl 1832(%esp), %eax
+ movl %eax, 124(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 1836(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 1840(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 112(%esp), %eax # 4-byte Reload
+ adcl 1844(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 1848(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ sbbl %esi, %esi
+ movl %ecx, %eax
+ movl %ecx, %ebp
+ imull 56(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 1712(%esp), %ecx
+ movl 2460(%esp), %edx
+ calll .LmulPv512x32
+ andl $1, %esi
+ movl %esi, %ecx
+ addl 1712(%esp), %ebp
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 1716(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 1720(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 1724(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 1728(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ adcl 1732(%esp), %edi
+ movl %edi, 68(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 1736(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 1740(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 92(%esp), %edi # 4-byte Reload
+ adcl 1744(%esp), %edi
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 1748(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 116(%esp), %eax # 4-byte Reload
+ adcl 1752(%esp), %eax
+ movl %eax, 116(%esp) # 4-byte Spill
+ movl 120(%esp), %eax # 4-byte Reload
+ adcl 1756(%esp), %eax
+ movl %eax, 120(%esp) # 4-byte Spill
+ movl 124(%esp), %eax # 4-byte Reload
+ adcl 1760(%esp), %eax
+ movl %eax, 124(%esp) # 4-byte Spill
+ movl 108(%esp), %ebp # 4-byte Reload
+ adcl 1764(%esp), %ebp
+ movl 104(%esp), %esi # 4-byte Reload
+ adcl 1768(%esp), %esi
+ movl 112(%esp), %eax # 4-byte Reload
+ adcl 1772(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 1776(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ adcl $0, %ecx
+ movl %ecx, 88(%esp) # 4-byte Spill
+ movl 2456(%esp), %eax
+ movl 20(%eax), %eax
+ movl %eax, (%esp)
+ leal 1640(%esp), %ecx
+ movl 2452(%esp), %edx
+ calll .LmulPv512x32
+ movl 80(%esp), %ecx # 4-byte Reload
+ addl 1640(%esp), %ecx
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 1644(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 1648(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 1652(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 1656(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 1660(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 1664(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ adcl 1668(%esp), %edi
+ movl %edi, 92(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 1672(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 116(%esp), %eax # 4-byte Reload
+ adcl 1676(%esp), %eax
+ movl %eax, 116(%esp) # 4-byte Spill
+ movl 120(%esp), %eax # 4-byte Reload
+ adcl 1680(%esp), %eax
+ movl %eax, 120(%esp) # 4-byte Spill
+ movl 124(%esp), %eax # 4-byte Reload
+ adcl 1684(%esp), %eax
+ movl %eax, 124(%esp) # 4-byte Spill
+ adcl 1688(%esp), %ebp
+ movl %ebp, 108(%esp) # 4-byte Spill
+ adcl 1692(%esp), %esi
+ movl %esi, %edi
+ movl 112(%esp), %eax # 4-byte Reload
+ adcl 1696(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 1700(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 88(%esp), %esi # 4-byte Reload
+ adcl 1704(%esp), %esi
+ sbbl %eax, %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ movl %ecx, %ebp
+ imull 56(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 1568(%esp), %ecx
+ movl 2460(%esp), %edx
+ calll .LmulPv512x32
+ movl 80(%esp), %eax # 4-byte Reload
+ andl $1, %eax
+ addl 1568(%esp), %ebp
+ movl 60(%esp), %ecx # 4-byte Reload
+ adcl 1572(%esp), %ecx
+ movl %ecx, 60(%esp) # 4-byte Spill
+ movl 72(%esp), %ecx # 4-byte Reload
+ adcl 1576(%esp), %ecx
+ movl %ecx, 72(%esp) # 4-byte Spill
+ movl 64(%esp), %ecx # 4-byte Reload
+ adcl 1580(%esp), %ecx
+ movl %ecx, 64(%esp) # 4-byte Spill
+ movl 68(%esp), %ecx # 4-byte Reload
+ adcl 1584(%esp), %ecx
+ movl %ecx, 68(%esp) # 4-byte Spill
+ movl 76(%esp), %ebp # 4-byte Reload
+ adcl 1588(%esp), %ebp
+ movl 84(%esp), %ecx # 4-byte Reload
+ adcl 1592(%esp), %ecx
+ movl %ecx, 84(%esp) # 4-byte Spill
+ movl 92(%esp), %ecx # 4-byte Reload
+ adcl 1596(%esp), %ecx
+ movl %ecx, 92(%esp) # 4-byte Spill
+ movl 96(%esp), %ecx # 4-byte Reload
+ adcl 1600(%esp), %ecx
+ movl %ecx, 96(%esp) # 4-byte Spill
+ movl 116(%esp), %ecx # 4-byte Reload
+ adcl 1604(%esp), %ecx
+ movl %ecx, 116(%esp) # 4-byte Spill
+ movl 120(%esp), %ecx # 4-byte Reload
+ adcl 1608(%esp), %ecx
+ movl %ecx, 120(%esp) # 4-byte Spill
+ movl 124(%esp), %ecx # 4-byte Reload
+ adcl 1612(%esp), %ecx
+ movl %ecx, 124(%esp) # 4-byte Spill
+ movl 108(%esp), %ecx # 4-byte Reload
+ adcl 1616(%esp), %ecx
+ movl %ecx, 108(%esp) # 4-byte Spill
+ adcl 1620(%esp), %edi
+ movl %edi, 104(%esp) # 4-byte Spill
+ movl 112(%esp), %ecx # 4-byte Reload
+ adcl 1624(%esp), %ecx
+ movl %ecx, 112(%esp) # 4-byte Spill
+ movl 100(%esp), %ecx # 4-byte Reload
+ adcl 1628(%esp), %ecx
+ movl %ecx, 100(%esp) # 4-byte Spill
+ adcl 1632(%esp), %esi
+ movl %esi, 88(%esp) # 4-byte Spill
+ adcl $0, %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 2456(%esp), %eax
+ movl 24(%eax), %eax
+ movl %eax, (%esp)
+ leal 1496(%esp), %ecx
+ movl 2452(%esp), %edx
+ calll .LmulPv512x32
+ movl 60(%esp), %ecx # 4-byte Reload
+ addl 1496(%esp), %ecx
+ movl %ecx, 60(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 1500(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 64(%esp), %edi # 4-byte Reload
+ adcl 1504(%esp), %edi
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 1508(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ adcl 1512(%esp), %ebp
+ movl %ebp, 76(%esp) # 4-byte Spill
+ movl 84(%esp), %esi # 4-byte Reload
+ adcl 1516(%esp), %esi
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 1520(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 1524(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 116(%esp), %eax # 4-byte Reload
+ adcl 1528(%esp), %eax
+ movl %eax, 116(%esp) # 4-byte Spill
+ movl 120(%esp), %eax # 4-byte Reload
+ adcl 1532(%esp), %eax
+ movl %eax, 120(%esp) # 4-byte Spill
+ movl 124(%esp), %eax # 4-byte Reload
+ adcl 1536(%esp), %eax
+ movl %eax, 124(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 1540(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 1544(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 112(%esp), %eax # 4-byte Reload
+ adcl 1548(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 1552(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 1556(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 1560(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ sbbl %ebp, %ebp
+ movl %ecx, %eax
+ imull 56(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 1424(%esp), %ecx
+ movl 2460(%esp), %eax
+ movl %eax, %edx
+ calll .LmulPv512x32
+ andl $1, %ebp
+ movl 60(%esp), %eax # 4-byte Reload
+ addl 1424(%esp), %eax
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 1428(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ adcl 1432(%esp), %edi
+ movl %edi, 64(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 1436(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 1440(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ adcl 1444(%esp), %esi
+ movl %esi, 84(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 1448(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 1452(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 116(%esp), %eax # 4-byte Reload
+ adcl 1456(%esp), %eax
+ movl %eax, 116(%esp) # 4-byte Spill
+ movl 120(%esp), %eax # 4-byte Reload
+ adcl 1460(%esp), %eax
+ movl %eax, 120(%esp) # 4-byte Spill
+ movl 124(%esp), %eax # 4-byte Reload
+ adcl 1464(%esp), %eax
+ movl %eax, 124(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 1468(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 104(%esp), %edi # 4-byte Reload
+ adcl 1472(%esp), %edi
+ movl 112(%esp), %eax # 4-byte Reload
+ adcl 1476(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 100(%esp), %esi # 4-byte Reload
+ adcl 1480(%esp), %esi
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 1484(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 1488(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ adcl $0, %ebp
+ movl 2456(%esp), %eax
+ movl 28(%eax), %eax
+ movl %eax, (%esp)
+ leal 1352(%esp), %ecx
+ movl 2452(%esp), %edx
+ calll .LmulPv512x32
+ movl 72(%esp), %ecx # 4-byte Reload
+ addl 1352(%esp), %ecx
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 1356(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 1360(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 1364(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 1368(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 1372(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 1376(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 116(%esp), %eax # 4-byte Reload
+ adcl 1380(%esp), %eax
+ movl %eax, 116(%esp) # 4-byte Spill
+ movl 120(%esp), %eax # 4-byte Reload
+ adcl 1384(%esp), %eax
+ movl %eax, 120(%esp) # 4-byte Spill
+ movl 124(%esp), %eax # 4-byte Reload
+ adcl 1388(%esp), %eax
+ movl %eax, 124(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 1392(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ adcl 1396(%esp), %edi
+ movl %edi, 104(%esp) # 4-byte Spill
+ movl 112(%esp), %eax # 4-byte Reload
+ adcl 1400(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ adcl 1404(%esp), %esi
+ movl %esi, 100(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 1408(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 80(%esp), %esi # 4-byte Reload
+ adcl 1412(%esp), %esi
+ adcl 1416(%esp), %ebp
+ movl %ebp, 72(%esp) # 4-byte Spill
+ sbbl %ebp, %ebp
+ movl %ecx, %eax
+ movl %ecx, %edi
+ imull 56(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 1280(%esp), %ecx
+ movl 2460(%esp), %edx
+ calll .LmulPv512x32
+ andl $1, %ebp
+ movl %ebp, %eax
+ addl 1280(%esp), %edi
+ movl 64(%esp), %ecx # 4-byte Reload
+ adcl 1284(%esp), %ecx
+ movl %ecx, 64(%esp) # 4-byte Spill
+ movl 68(%esp), %ebp # 4-byte Reload
+ adcl 1288(%esp), %ebp
+ movl 76(%esp), %ecx # 4-byte Reload
+ adcl 1292(%esp), %ecx
+ movl %ecx, 76(%esp) # 4-byte Spill
+ movl 84(%esp), %ecx # 4-byte Reload
+ adcl 1296(%esp), %ecx
+ movl %ecx, 84(%esp) # 4-byte Spill
+ movl 92(%esp), %ecx # 4-byte Reload
+ adcl 1300(%esp), %ecx
+ movl %ecx, 92(%esp) # 4-byte Spill
+ movl 96(%esp), %ecx # 4-byte Reload
+ adcl 1304(%esp), %ecx
+ movl %ecx, 96(%esp) # 4-byte Spill
+ movl 116(%esp), %ecx # 4-byte Reload
+ adcl 1308(%esp), %ecx
+ movl %ecx, 116(%esp) # 4-byte Spill
+ movl 120(%esp), %ecx # 4-byte Reload
+ adcl 1312(%esp), %ecx
+ movl %ecx, 120(%esp) # 4-byte Spill
+ movl 124(%esp), %ecx # 4-byte Reload
+ adcl 1316(%esp), %ecx
+ movl %ecx, 124(%esp) # 4-byte Spill
+ movl 108(%esp), %ecx # 4-byte Reload
+ adcl 1320(%esp), %ecx
+ movl %ecx, 108(%esp) # 4-byte Spill
+ movl 104(%esp), %ecx # 4-byte Reload
+ adcl 1324(%esp), %ecx
+ movl %ecx, 104(%esp) # 4-byte Spill
+ movl 112(%esp), %ecx # 4-byte Reload
+ adcl 1328(%esp), %ecx
+ movl %ecx, 112(%esp) # 4-byte Spill
+ movl 100(%esp), %ecx # 4-byte Reload
+ adcl 1332(%esp), %ecx
+ movl %ecx, 100(%esp) # 4-byte Spill
+ movl 88(%esp), %ecx # 4-byte Reload
+ adcl 1336(%esp), %ecx
+ movl %ecx, 88(%esp) # 4-byte Spill
+ adcl 1340(%esp), %esi
+ movl %esi, 80(%esp) # 4-byte Spill
+ movl 72(%esp), %ecx # 4-byte Reload
+ adcl 1344(%esp), %ecx
+ movl %ecx, 72(%esp) # 4-byte Spill
+ adcl $0, %eax
+ movl %eax, %edi
+ movl 2456(%esp), %eax
+ movl 32(%eax), %eax
+ movl %eax, (%esp)
+ leal 1208(%esp), %ecx
+ movl 2452(%esp), %eax
+ movl %eax, %edx
+ calll .LmulPv512x32
+ movl 64(%esp), %ecx # 4-byte Reload
+ addl 1208(%esp), %ecx
+ adcl 1212(%esp), %ebp
+ movl %ebp, 68(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 1216(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 1220(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 1224(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 1228(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 116(%esp), %eax # 4-byte Reload
+ adcl 1232(%esp), %eax
+ movl %eax, 116(%esp) # 4-byte Spill
+ movl 120(%esp), %eax # 4-byte Reload
+ adcl 1236(%esp), %eax
+ movl %eax, 120(%esp) # 4-byte Spill
+ movl 124(%esp), %eax # 4-byte Reload
+ adcl 1240(%esp), %eax
+ movl %eax, 124(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 1244(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 1248(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 112(%esp), %eax # 4-byte Reload
+ adcl 1252(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 1256(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 88(%esp), %esi # 4-byte Reload
+ adcl 1260(%esp), %esi
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 1264(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 1268(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ adcl 1272(%esp), %edi
+ movl %edi, 64(%esp) # 4-byte Spill
+ sbbl %edi, %edi
+ movl %ecx, %eax
+ movl %ecx, %ebp
+ imull 56(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 1136(%esp), %ecx
+ movl 2460(%esp), %edx
+ calll .LmulPv512x32
+ andl $1, %edi
+ movl %edi, %ecx
+ addl 1136(%esp), %ebp
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 1140(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 1144(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 1148(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 1152(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 1156(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 116(%esp), %eax # 4-byte Reload
+ adcl 1160(%esp), %eax
+ movl %eax, 116(%esp) # 4-byte Spill
+ movl 120(%esp), %edi # 4-byte Reload
+ adcl 1164(%esp), %edi
+ movl 124(%esp), %eax # 4-byte Reload
+ adcl 1168(%esp), %eax
+ movl %eax, 124(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 1172(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 1176(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 112(%esp), %eax # 4-byte Reload
+ adcl 1180(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 1184(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ adcl 1188(%esp), %esi
+ movl %esi, 88(%esp) # 4-byte Spill
+ movl 80(%esp), %ebp # 4-byte Reload
+ adcl 1192(%esp), %ebp
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 1196(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 1200(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ adcl $0, %ecx
+ movl %ecx, 60(%esp) # 4-byte Spill
+ movl 2456(%esp), %eax
+ movl 36(%eax), %eax
+ movl %eax, (%esp)
+ leal 1064(%esp), %ecx
+ movl 2452(%esp), %edx
+ calll .LmulPv512x32
+ movl 68(%esp), %ecx # 4-byte Reload
+ addl 1064(%esp), %ecx
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 1068(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 1072(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 1076(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 1080(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 116(%esp), %eax # 4-byte Reload
+ adcl 1084(%esp), %eax
+ movl %eax, 116(%esp) # 4-byte Spill
+ adcl 1088(%esp), %edi
+ movl %edi, 120(%esp) # 4-byte Spill
+ movl 124(%esp), %esi # 4-byte Reload
+ adcl 1092(%esp), %esi
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 1096(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 1100(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 112(%esp), %eax # 4-byte Reload
+ adcl 1104(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 1108(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 1112(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ adcl 1116(%esp), %ebp
+ movl %ebp, 80(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 1120(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 1124(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 1128(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ sbbl %edi, %edi
+ movl %ecx, %eax
+ movl %ecx, %ebp
+ imull 56(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 992(%esp), %ecx
+ movl 2460(%esp), %edx
+ calll .LmulPv512x32
+ movl %edi, %eax
+ andl $1, %eax
+ addl 992(%esp), %ebp
+ movl 76(%esp), %ecx # 4-byte Reload
+ adcl 996(%esp), %ecx
+ movl %ecx, 76(%esp) # 4-byte Spill
+ movl 84(%esp), %ecx # 4-byte Reload
+ adcl 1000(%esp), %ecx
+ movl %ecx, 84(%esp) # 4-byte Spill
+ movl 92(%esp), %ecx # 4-byte Reload
+ adcl 1004(%esp), %ecx
+ movl %ecx, 92(%esp) # 4-byte Spill
+ movl 96(%esp), %edi # 4-byte Reload
+ adcl 1008(%esp), %edi
+ movl 116(%esp), %ebp # 4-byte Reload
+ adcl 1012(%esp), %ebp
+ movl 120(%esp), %ecx # 4-byte Reload
+ adcl 1016(%esp), %ecx
+ movl %ecx, 120(%esp) # 4-byte Spill
+ adcl 1020(%esp), %esi
+ movl %esi, 124(%esp) # 4-byte Spill
+ movl 108(%esp), %ecx # 4-byte Reload
+ adcl 1024(%esp), %ecx
+ movl %ecx, 108(%esp) # 4-byte Spill
+ movl 104(%esp), %ecx # 4-byte Reload
+ adcl 1028(%esp), %ecx
+ movl %ecx, 104(%esp) # 4-byte Spill
+ movl 112(%esp), %esi # 4-byte Reload
+ adcl 1032(%esp), %esi
+ movl 100(%esp), %ecx # 4-byte Reload
+ adcl 1036(%esp), %ecx
+ movl %ecx, 100(%esp) # 4-byte Spill
+ movl 88(%esp), %ecx # 4-byte Reload
+ adcl 1040(%esp), %ecx
+ movl %ecx, 88(%esp) # 4-byte Spill
+ movl 80(%esp), %ecx # 4-byte Reload
+ adcl 1044(%esp), %ecx
+ movl %ecx, 80(%esp) # 4-byte Spill
+ movl 72(%esp), %ecx # 4-byte Reload
+ adcl 1048(%esp), %ecx
+ movl %ecx, 72(%esp) # 4-byte Spill
+ movl 64(%esp), %ecx # 4-byte Reload
+ adcl 1052(%esp), %ecx
+ movl %ecx, 64(%esp) # 4-byte Spill
+ movl 60(%esp), %ecx # 4-byte Reload
+ adcl 1056(%esp), %ecx
+ movl %ecx, 60(%esp) # 4-byte Spill
+ adcl $0, %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 2456(%esp), %eax
+ movl 40(%eax), %eax
+ movl %eax, (%esp)
+ leal 920(%esp), %ecx
+ movl 2452(%esp), %edx
+ calll .LmulPv512x32
+ movl 76(%esp), %ecx # 4-byte Reload
+ addl 920(%esp), %ecx
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 924(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 928(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ adcl 932(%esp), %edi
+ movl %edi, 96(%esp) # 4-byte Spill
+ adcl 936(%esp), %ebp
+ movl %ebp, 116(%esp) # 4-byte Spill
+ movl 120(%esp), %eax # 4-byte Reload
+ adcl 940(%esp), %eax
+ movl %eax, 120(%esp) # 4-byte Spill
+ movl 124(%esp), %eax # 4-byte Reload
+ adcl 944(%esp), %eax
+ movl %eax, 124(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 948(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 952(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ adcl 956(%esp), %esi
+ movl %esi, 112(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 960(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 964(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 80(%esp), %ebp # 4-byte Reload
+ adcl 968(%esp), %ebp
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 972(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 976(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 980(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 984(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ sbbl %edi, %edi
+ movl %ecx, %esi
+ movl %esi, %eax
+ imull 56(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 848(%esp), %ecx
+ movl 2460(%esp), %edx
+ calll .LmulPv512x32
+ andl $1, %edi
+ movl %edi, %ecx
+ addl 848(%esp), %esi
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 852(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 92(%esp), %edi # 4-byte Reload
+ adcl 856(%esp), %edi
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 860(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 116(%esp), %eax # 4-byte Reload
+ adcl 864(%esp), %eax
+ movl %eax, 116(%esp) # 4-byte Spill
+ movl 120(%esp), %esi # 4-byte Reload
+ adcl 868(%esp), %esi
+ movl 124(%esp), %eax # 4-byte Reload
+ adcl 872(%esp), %eax
+ movl %eax, 124(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 876(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 880(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 112(%esp), %eax # 4-byte Reload
+ adcl 884(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 888(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 892(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ adcl 896(%esp), %ebp
+ movl %ebp, 80(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 900(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 904(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 908(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 912(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ adcl $0, %ecx
+ movl %ecx, 76(%esp) # 4-byte Spill
+ movl 2456(%esp), %eax
+ movl 44(%eax), %eax
+ movl %eax, (%esp)
+ leal 776(%esp), %ecx
+ movl 2452(%esp), %edx
+ calll .LmulPv512x32
+ movl 84(%esp), %ecx # 4-byte Reload
+ addl 776(%esp), %ecx
+ adcl 780(%esp), %edi
+ movl %edi, 92(%esp) # 4-byte Spill
+ movl 96(%esp), %edi # 4-byte Reload
+ adcl 784(%esp), %edi
+ movl 116(%esp), %eax # 4-byte Reload
+ adcl 788(%esp), %eax
+ movl %eax, 116(%esp) # 4-byte Spill
+ adcl 792(%esp), %esi
+ movl %esi, 120(%esp) # 4-byte Spill
+ movl 124(%esp), %eax # 4-byte Reload
+ adcl 796(%esp), %eax
+ movl %eax, 124(%esp) # 4-byte Spill
+ movl 108(%esp), %ebp # 4-byte Reload
+ adcl 800(%esp), %ebp
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 804(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 112(%esp), %eax # 4-byte Reload
+ adcl 808(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 812(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 816(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 820(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 824(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 828(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 832(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 836(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 840(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ sbbl %eax, %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ movl %ecx, %esi
+ imull 56(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 704(%esp), %ecx
+ movl 2460(%esp), %edx
+ calll .LmulPv512x32
+ movl 84(%esp), %ecx # 4-byte Reload
+ andl $1, %ecx
+ addl 704(%esp), %esi
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 708(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ adcl 712(%esp), %edi
+ movl %edi, 96(%esp) # 4-byte Spill
+ movl 116(%esp), %eax # 4-byte Reload
+ adcl 716(%esp), %eax
+ movl %eax, 116(%esp) # 4-byte Spill
+ movl 120(%esp), %eax # 4-byte Reload
+ adcl 720(%esp), %eax
+ movl %eax, 120(%esp) # 4-byte Spill
+ movl 124(%esp), %eax # 4-byte Reload
+ adcl 724(%esp), %eax
+ movl %eax, 124(%esp) # 4-byte Spill
+ movl %ebp, %esi
+ adcl 728(%esp), %esi
+ movl 104(%esp), %ebp # 4-byte Reload
+ adcl 732(%esp), %ebp
+ movl 112(%esp), %eax # 4-byte Reload
+ adcl 736(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 740(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 744(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 748(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 72(%esp), %edi # 4-byte Reload
+ adcl 752(%esp), %edi
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 756(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 760(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 764(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 768(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ adcl $0, %ecx
+ movl %ecx, 84(%esp) # 4-byte Spill
+ movl 2456(%esp), %eax
+ movl 48(%eax), %eax
+ movl %eax, (%esp)
+ leal 632(%esp), %ecx
+ movl 2452(%esp), %edx
+ calll .LmulPv512x32
+ movl 92(%esp), %ecx # 4-byte Reload
+ addl 632(%esp), %ecx
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 636(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 116(%esp), %eax # 4-byte Reload
+ adcl 640(%esp), %eax
+ movl %eax, 116(%esp) # 4-byte Spill
+ movl 120(%esp), %eax # 4-byte Reload
+ adcl 644(%esp), %eax
+ movl %eax, 120(%esp) # 4-byte Spill
+ movl 124(%esp), %eax # 4-byte Reload
+ adcl 648(%esp), %eax
+ movl %eax, 124(%esp) # 4-byte Spill
+ adcl 652(%esp), %esi
+ movl %esi, 108(%esp) # 4-byte Spill
+ adcl 656(%esp), %ebp
+ movl %ebp, 104(%esp) # 4-byte Spill
+ movl 112(%esp), %eax # 4-byte Reload
+ adcl 660(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 100(%esp), %ebp # 4-byte Reload
+ adcl 664(%esp), %ebp
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 668(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 672(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ adcl 676(%esp), %edi
+ movl %edi, 72(%esp) # 4-byte Spill
+ movl 64(%esp), %edi # 4-byte Reload
+ adcl 680(%esp), %edi
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 684(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 688(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 692(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 696(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ sbbl %eax, %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ movl %ecx, %esi
+ imull 56(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 560(%esp), %ecx
+ movl 2460(%esp), %edx
+ calll .LmulPv512x32
+ movl 92(%esp), %ecx # 4-byte Reload
+ andl $1, %ecx
+ addl 560(%esp), %esi
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 564(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 116(%esp), %eax # 4-byte Reload
+ adcl 568(%esp), %eax
+ movl %eax, 116(%esp) # 4-byte Spill
+ movl 120(%esp), %eax # 4-byte Reload
+ adcl 572(%esp), %eax
+ movl %eax, 120(%esp) # 4-byte Spill
+ movl 124(%esp), %esi # 4-byte Reload
+ adcl 576(%esp), %esi
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 580(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 584(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 112(%esp), %eax # 4-byte Reload
+ adcl 588(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ adcl 592(%esp), %ebp
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 596(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 600(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 604(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ adcl 608(%esp), %edi
+ movl %edi, 64(%esp) # 4-byte Spill
+ movl 60(%esp), %edi # 4-byte Reload
+ adcl 612(%esp), %edi
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 616(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 620(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 624(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ adcl $0, %ecx
+ movl %ecx, 92(%esp) # 4-byte Spill
+ movl 2456(%esp), %eax
+ movl 52(%eax), %eax
+ movl %eax, (%esp)
+ leal 488(%esp), %ecx
+ movl 2452(%esp), %edx
+ calll .LmulPv512x32
+ movl 96(%esp), %ecx # 4-byte Reload
+ addl 488(%esp), %ecx
+ movl 116(%esp), %eax # 4-byte Reload
+ adcl 492(%esp), %eax
+ movl %eax, 116(%esp) # 4-byte Spill
+ movl 120(%esp), %eax # 4-byte Reload
+ adcl 496(%esp), %eax
+ movl %eax, 120(%esp) # 4-byte Spill
+ adcl 500(%esp), %esi
+ movl %esi, 124(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 504(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 104(%esp), %esi # 4-byte Reload
+ adcl 508(%esp), %esi
+ movl 112(%esp), %eax # 4-byte Reload
+ adcl 512(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ adcl 516(%esp), %ebp
+ movl %ebp, 100(%esp) # 4-byte Spill
+ movl 88(%esp), %ebp # 4-byte Reload
+ adcl 520(%esp), %ebp
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 524(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 528(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 532(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ adcl 536(%esp), %edi
+ movl %edi, 60(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 540(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 544(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 548(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 552(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ sbbl %eax, %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ movl %ecx, %edi
+ imull 56(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 416(%esp), %ecx
+ movl 2460(%esp), %edx
+ calll .LmulPv512x32
+ movl 96(%esp), %ecx # 4-byte Reload
+ andl $1, %ecx
+ addl 416(%esp), %edi
+ movl 116(%esp), %eax # 4-byte Reload
+ adcl 420(%esp), %eax
+ movl %eax, 116(%esp) # 4-byte Spill
+ movl 120(%esp), %eax # 4-byte Reload
+ adcl 424(%esp), %eax
+ movl %eax, 120(%esp) # 4-byte Spill
+ movl 124(%esp), %eax # 4-byte Reload
+ adcl 428(%esp), %eax
+ movl %eax, 124(%esp) # 4-byte Spill
+ movl 108(%esp), %edi # 4-byte Reload
+ adcl 432(%esp), %edi
+ adcl 436(%esp), %esi
+ movl %esi, 104(%esp) # 4-byte Spill
+ movl 112(%esp), %esi # 4-byte Reload
+ adcl 440(%esp), %esi
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 444(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ adcl 448(%esp), %ebp
+ movl %ebp, 88(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 452(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 456(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 460(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 464(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 468(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 472(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 476(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 480(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ adcl $0, %ecx
+ movl %ecx, 96(%esp) # 4-byte Spill
+ movl 2456(%esp), %eax
+ movl 56(%eax), %eax
+ movl %eax, (%esp)
+ leal 344(%esp), %ecx
+ movl 2452(%esp), %edx
+ calll .LmulPv512x32
+ movl 116(%esp), %ecx # 4-byte Reload
+ addl 344(%esp), %ecx
+ movl 120(%esp), %ebp # 4-byte Reload
+ adcl 348(%esp), %ebp
+ movl 124(%esp), %eax # 4-byte Reload
+ adcl 352(%esp), %eax
+ movl %eax, 124(%esp) # 4-byte Spill
+ adcl 356(%esp), %edi
+ movl %edi, 108(%esp) # 4-byte Spill
+ movl 104(%esp), %edi # 4-byte Reload
+ adcl 360(%esp), %edi
+ adcl 364(%esp), %esi
+ movl %esi, 112(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 368(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 372(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 376(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 380(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 384(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 388(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 392(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 396(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 400(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 404(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 408(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ sbbl %eax, %eax
+ movl %eax, 116(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ movl %ecx, %esi
+ imull 56(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 272(%esp), %ecx
+ movl 2460(%esp), %edx
+ calll .LmulPv512x32
+ movl 116(%esp), %ecx # 4-byte Reload
+ andl $1, %ecx
+ addl 272(%esp), %esi
+ adcl 276(%esp), %ebp
+ movl %ebp, 120(%esp) # 4-byte Spill
+ movl 124(%esp), %eax # 4-byte Reload
+ adcl 280(%esp), %eax
+ movl %eax, 124(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 284(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ adcl 288(%esp), %edi
+ movl 112(%esp), %eax # 4-byte Reload
+ adcl 292(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 100(%esp), %esi # 4-byte Reload
+ adcl 296(%esp), %esi
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 300(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 304(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 72(%esp), %ebp # 4-byte Reload
+ adcl 308(%esp), %ebp
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 312(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 316(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 320(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 324(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 328(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 332(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 336(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ adcl $0, %ecx
+ movl %ecx, 116(%esp) # 4-byte Spill
+ movl 2456(%esp), %eax
+ movl 60(%eax), %eax
+ movl %eax, (%esp)
+ leal 200(%esp), %ecx
+ movl 2452(%esp), %edx
+ calll .LmulPv512x32
+ movl 120(%esp), %ecx # 4-byte Reload
+ addl 200(%esp), %ecx
+ movl 124(%esp), %eax # 4-byte Reload
+ adcl 204(%esp), %eax
+ movl %eax, 124(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 208(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ adcl 212(%esp), %edi
+ movl %edi, 104(%esp) # 4-byte Spill
+ movl 112(%esp), %eax # 4-byte Reload
+ adcl 216(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ adcl 220(%esp), %esi
+ movl %esi, 100(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 224(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 228(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ adcl 232(%esp), %ebp
+ movl %ebp, 72(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 236(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 240(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 68(%esp), %edi # 4-byte Reload
+ adcl 244(%esp), %edi
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 248(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 252(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 256(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 260(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 116(%esp), %eax # 4-byte Reload
+ adcl 264(%esp), %eax
+ movl %eax, 116(%esp) # 4-byte Spill
+ sbbl %ebp, %ebp
+ movl 56(%esp), %eax # 4-byte Reload
+ imull %ecx, %eax
+ movl %ecx, %esi
+ movl %eax, (%esp)
+ leal 128(%esp), %ecx
+ movl 2460(%esp), %eax
+ movl %eax, %edx
+ calll .LmulPv512x32
+ andl $1, %ebp
+ addl 128(%esp), %esi
+ movl 104(%esp), %ebx # 4-byte Reload
+ movl 124(%esp), %eax # 4-byte Reload
+ adcl 132(%esp), %eax
+ movl %eax, 124(%esp) # 4-byte Spill
+ movl 108(%esp), %ecx # 4-byte Reload
+ adcl 136(%esp), %ecx
+ movl %ecx, 108(%esp) # 4-byte Spill
+ adcl 140(%esp), %ebx
+ movl %ebx, 104(%esp) # 4-byte Spill
+ movl 112(%esp), %edx # 4-byte Reload
+ adcl 144(%esp), %edx
+ movl %edx, 112(%esp) # 4-byte Spill
+ movl 100(%esp), %edx # 4-byte Reload
+ adcl 148(%esp), %edx
+ movl %edx, 100(%esp) # 4-byte Spill
+ movl 88(%esp), %edx # 4-byte Reload
+ adcl 152(%esp), %edx
+ movl %edx, 88(%esp) # 4-byte Spill
+ movl 80(%esp), %esi # 4-byte Reload
+ adcl 156(%esp), %esi
+ movl %esi, 80(%esp) # 4-byte Spill
+ movl 72(%esp), %edx # 4-byte Reload
+ adcl 160(%esp), %edx
+ movl %edx, 72(%esp) # 4-byte Spill
+ movl 64(%esp), %edx # 4-byte Reload
+ adcl 164(%esp), %edx
+ movl %edx, 64(%esp) # 4-byte Spill
+ movl 60(%esp), %edx # 4-byte Reload
+ adcl 168(%esp), %edx
+ movl %edx, 60(%esp) # 4-byte Spill
+ adcl 172(%esp), %edi
+ movl %edi, 68(%esp) # 4-byte Spill
+ movl 76(%esp), %edx # 4-byte Reload
+ adcl 176(%esp), %edx
+ movl %edx, 76(%esp) # 4-byte Spill
+ movl 84(%esp), %edx # 4-byte Reload
+ adcl 180(%esp), %edx
+ movl %edx, 84(%esp) # 4-byte Spill
+ movl 92(%esp), %edx # 4-byte Reload
+ adcl 184(%esp), %edx
+ movl %edx, 92(%esp) # 4-byte Spill
+ movl 96(%esp), %edx # 4-byte Reload
+ adcl 188(%esp), %edx
+ movl %edx, 96(%esp) # 4-byte Spill
+ movl 116(%esp), %edx # 4-byte Reload
+ adcl 192(%esp), %edx
+ movl %edx, 116(%esp) # 4-byte Spill
+ adcl $0, %ebp
+ movl %eax, %edx
+ movl 2460(%esp), %edi
+ subl (%edi), %edx
+ movl %ecx, %eax
+ sbbl 4(%edi), %eax
+ movl %ebx, %ecx
+ sbbl 8(%edi), %ecx
+ movl 112(%esp), %ebx # 4-byte Reload
+ sbbl 12(%edi), %ebx
+ movl %ebx, 12(%esp) # 4-byte Spill
+ movl 100(%esp), %ebx # 4-byte Reload
+ sbbl 16(%edi), %ebx
+ movl %ebx, 16(%esp) # 4-byte Spill
+ movl 88(%esp), %ebx # 4-byte Reload
+ sbbl 20(%edi), %ebx
+ movl %ebx, 20(%esp) # 4-byte Spill
+ sbbl 24(%edi), %esi
+ movl %esi, 24(%esp) # 4-byte Spill
+ movl 72(%esp), %esi # 4-byte Reload
+ sbbl 28(%edi), %esi
+ movl %esi, 28(%esp) # 4-byte Spill
+ movl 64(%esp), %esi # 4-byte Reload
+ sbbl 32(%edi), %esi
+ movl %esi, 32(%esp) # 4-byte Spill
+ movl 60(%esp), %esi # 4-byte Reload
+ sbbl 36(%edi), %esi
+ movl %esi, 36(%esp) # 4-byte Spill
+ movl 68(%esp), %esi # 4-byte Reload
+ sbbl 40(%edi), %esi
+ movl %esi, 40(%esp) # 4-byte Spill
+ movl 76(%esp), %esi # 4-byte Reload
+ sbbl 44(%edi), %esi
+ movl %esi, 44(%esp) # 4-byte Spill
+ movl 84(%esp), %esi # 4-byte Reload
+ sbbl 48(%edi), %esi
+ movl %esi, 48(%esp) # 4-byte Spill
+ movl 92(%esp), %esi # 4-byte Reload
+ sbbl 52(%edi), %esi
+ movl %esi, 52(%esp) # 4-byte Spill
+ movl 96(%esp), %esi # 4-byte Reload
+ sbbl 56(%edi), %esi
+ movl %esi, 56(%esp) # 4-byte Spill
+ movl 116(%esp), %esi # 4-byte Reload
+ sbbl 60(%edi), %esi
+ movl %esi, 120(%esp) # 4-byte Spill
+ movl 124(%esp), %edi # 4-byte Reload
+ sbbl $0, %ebp
+ andl $1, %ebp
+ movl %ebp, %ebx
+ jne .LBB242_2
+# BB#1:
+ movl %edx, %edi
+.LBB242_2:
+ movl 2448(%esp), %edx
+ movl %edi, (%edx)
+ testb %bl, %bl
+ movl 108(%esp), %edi # 4-byte Reload
+ jne .LBB242_4
+# BB#3:
+ movl %eax, %edi
+.LBB242_4:
+ movl %edi, 4(%edx)
+ jne .LBB242_6
+# BB#5:
+ movl %ecx, 104(%esp) # 4-byte Spill
+.LBB242_6:
+ movl 104(%esp), %eax # 4-byte Reload
+ movl %eax, 8(%edx)
+ jne .LBB242_8
+# BB#7:
+ movl 12(%esp), %eax # 4-byte Reload
+ movl %eax, 112(%esp) # 4-byte Spill
+.LBB242_8:
+ movl 112(%esp), %eax # 4-byte Reload
+ movl %eax, 12(%edx)
+ movl 100(%esp), %eax # 4-byte Reload
+ jne .LBB242_10
+# BB#9:
+ movl 16(%esp), %eax # 4-byte Reload
+.LBB242_10:
+ movl %eax, 16(%edx)
+ movl 88(%esp), %eax # 4-byte Reload
+ jne .LBB242_12
+# BB#11:
+ movl 20(%esp), %eax # 4-byte Reload
+.LBB242_12:
+ movl %eax, 20(%edx)
+ jne .LBB242_14
+# BB#13:
+ movl 24(%esp), %eax # 4-byte Reload
+ movl %eax, 80(%esp) # 4-byte Spill
+.LBB242_14:
+ movl 80(%esp), %eax # 4-byte Reload
+ movl %eax, 24(%edx)
+ movl 72(%esp), %eax # 4-byte Reload
+ jne .LBB242_16
+# BB#15:
+ movl 28(%esp), %eax # 4-byte Reload
+.LBB242_16:
+ movl %eax, 28(%edx)
+ movl 64(%esp), %eax # 4-byte Reload
+ jne .LBB242_18
+# BB#17:
+ movl 32(%esp), %eax # 4-byte Reload
+.LBB242_18:
+ movl %eax, 32(%edx)
+ movl 60(%esp), %eax # 4-byte Reload
+ jne .LBB242_20
+# BB#19:
+ movl 36(%esp), %eax # 4-byte Reload
+.LBB242_20:
+ movl %eax, 36(%edx)
+ movl 68(%esp), %eax # 4-byte Reload
+ jne .LBB242_22
+# BB#21:
+ movl 40(%esp), %eax # 4-byte Reload
+.LBB242_22:
+ movl %eax, 40(%edx)
+ movl 76(%esp), %eax # 4-byte Reload
+ jne .LBB242_24
+# BB#23:
+ movl 44(%esp), %eax # 4-byte Reload
+.LBB242_24:
+ movl %eax, 44(%edx)
+ movl 84(%esp), %eax # 4-byte Reload
+ jne .LBB242_26
+# BB#25:
+ movl 48(%esp), %eax # 4-byte Reload
+.LBB242_26:
+ movl %eax, 48(%edx)
+ movl 92(%esp), %eax # 4-byte Reload
+ jne .LBB242_28
+# BB#27:
+ movl 52(%esp), %eax # 4-byte Reload
+.LBB242_28:
+ movl %eax, 52(%edx)
+ movl 96(%esp), %eax # 4-byte Reload
+ jne .LBB242_30
+# BB#29:
+ movl 56(%esp), %eax # 4-byte Reload
+.LBB242_30:
+ movl %eax, 56(%edx)
+ movl 116(%esp), %eax # 4-byte Reload
+ jne .LBB242_32
+# BB#31:
+ movl 120(%esp), %eax # 4-byte Reload
+.LBB242_32:
+ movl %eax, 60(%edx)
+ addl $2428, %esp # imm = 0x97C
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end242:
+ .size mcl_fp_mont16L, .Lfunc_end242-mcl_fp_mont16L
+
+ .globl mcl_fp_montNF16L
+ .align 16, 0x90
+ .type mcl_fp_montNF16L,@function
+mcl_fp_montNF16L: # @mcl_fp_montNF16L
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $2412, %esp # imm = 0x96C
+ calll .L243$pb
+.L243$pb:
+ popl %ebx
+.Ltmp54:
+ addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp54-.L243$pb), %ebx
+ movl 2444(%esp), %eax
+ movl -4(%eax), %esi
+ movl %esi, 44(%esp) # 4-byte Spill
+ movl 2440(%esp), %eax
+ movl (%eax), %eax
+ movl %eax, (%esp)
+ leal 2344(%esp), %ecx
+ movl 2436(%esp), %edx
+ calll .LmulPv512x32
+ movl 2344(%esp), %edi
+ movl 2348(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl %edi, %eax
+ imull %esi, %eax
+ movl 2408(%esp), %ecx
+ movl %ecx, 100(%esp) # 4-byte Spill
+ movl 2404(%esp), %ecx
+ movl %ecx, 104(%esp) # 4-byte Spill
+ movl 2400(%esp), %ecx
+ movl %ecx, 92(%esp) # 4-byte Spill
+ movl 2396(%esp), %ecx
+ movl %ecx, 96(%esp) # 4-byte Spill
+ movl 2392(%esp), %ecx
+ movl %ecx, 88(%esp) # 4-byte Spill
+ movl 2388(%esp), %ecx
+ movl %ecx, 72(%esp) # 4-byte Spill
+ movl 2384(%esp), %ecx
+ movl %ecx, 56(%esp) # 4-byte Spill
+ movl 2380(%esp), %ecx
+ movl %ecx, 52(%esp) # 4-byte Spill
+ movl 2376(%esp), %ecx
+ movl %ecx, 48(%esp) # 4-byte Spill
+ movl 2372(%esp), %ecx
+ movl %ecx, 60(%esp) # 4-byte Spill
+ movl 2368(%esp), %ecx
+ movl %ecx, 64(%esp) # 4-byte Spill
+ movl 2364(%esp), %ebp
+ movl 2360(%esp), %esi
+ movl 2356(%esp), %ecx
+ movl %ecx, 76(%esp) # 4-byte Spill
+ movl 2352(%esp), %ecx
+ movl %ecx, 84(%esp) # 4-byte Spill
+ movl %eax, (%esp)
+ leal 2272(%esp), %ecx
+ movl 2444(%esp), %edx
+ calll .LmulPv512x32
+ addl 2272(%esp), %edi
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 2276(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 2280(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 2284(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ adcl 2288(%esp), %esi
+ movl %esi, 80(%esp) # 4-byte Spill
+ adcl 2292(%esp), %ebp
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 2296(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 2300(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 2304(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 52(%esp), %edi # 4-byte Reload
+ adcl 2308(%esp), %edi
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 2312(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 2316(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 2320(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 2324(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 92(%esp), %esi # 4-byte Reload
+ adcl 2328(%esp), %esi
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 2332(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 2336(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 2440(%esp), %eax
+ movl 4(%eax), %eax
+ movl %eax, (%esp)
+ leal 2200(%esp), %ecx
+ movl 2436(%esp), %edx
+ calll .LmulPv512x32
+ movl 2264(%esp), %edx
+ movl 108(%esp), %ecx # 4-byte Reload
+ addl 2200(%esp), %ecx
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 2204(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 2208(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 2212(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ adcl 2216(%esp), %ebp
+ movl %ebp, 68(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 2220(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 2224(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 2228(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ adcl 2232(%esp), %edi
+ movl %edi, 52(%esp) # 4-byte Spill
+ movl 56(%esp), %ebp # 4-byte Reload
+ adcl 2236(%esp), %ebp
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 2240(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 2244(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 2248(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ adcl 2252(%esp), %esi
+ movl %esi, 92(%esp) # 4-byte Spill
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 2256(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 100(%esp), %esi # 4-byte Reload
+ adcl 2260(%esp), %esi
+ adcl $0, %edx
+ movl %edx, 108(%esp) # 4-byte Spill
+ movl %ecx, %edi
+ movl %edi, %eax
+ imull 44(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 2128(%esp), %ecx
+ movl 2444(%esp), %edx
+ calll .LmulPv512x32
+ addl 2128(%esp), %edi
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 2132(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 2136(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 2140(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 2144(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 2148(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 2152(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 48(%esp), %edi # 4-byte Reload
+ adcl 2156(%esp), %edi
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 2160(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ adcl 2164(%esp), %ebp
+ movl %ebp, 56(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 2168(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 2172(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 2176(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 2180(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 2184(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ adcl 2188(%esp), %esi
+ movl %esi, 100(%esp) # 4-byte Spill
+ movl 108(%esp), %esi # 4-byte Reload
+ adcl 2192(%esp), %esi
+ movl 2440(%esp), %eax
+ movl 8(%eax), %eax
+ movl %eax, (%esp)
+ leal 2056(%esp), %ecx
+ movl 2436(%esp), %edx
+ calll .LmulPv512x32
+ movl 2120(%esp), %eax
+ movl 84(%esp), %edx # 4-byte Reload
+ addl 2056(%esp), %edx
+ movl 76(%esp), %ecx # 4-byte Reload
+ adcl 2060(%esp), %ecx
+ movl %ecx, 76(%esp) # 4-byte Spill
+ movl 80(%esp), %ecx # 4-byte Reload
+ adcl 2064(%esp), %ecx
+ movl %ecx, 80(%esp) # 4-byte Spill
+ movl 68(%esp), %ecx # 4-byte Reload
+ adcl 2068(%esp), %ecx
+ movl %ecx, 68(%esp) # 4-byte Spill
+ movl 64(%esp), %ecx # 4-byte Reload
+ adcl 2072(%esp), %ecx
+ movl %ecx, 64(%esp) # 4-byte Spill
+ movl 60(%esp), %ecx # 4-byte Reload
+ adcl 2076(%esp), %ecx
+ movl %ecx, 60(%esp) # 4-byte Spill
+ adcl 2080(%esp), %edi
+ movl %edi, %ebp
+ movl 52(%esp), %ecx # 4-byte Reload
+ adcl 2084(%esp), %ecx
+ movl %ecx, 52(%esp) # 4-byte Spill
+ movl 56(%esp), %ecx # 4-byte Reload
+ adcl 2088(%esp), %ecx
+ movl %ecx, 56(%esp) # 4-byte Spill
+ movl 72(%esp), %ecx # 4-byte Reload
+ adcl 2092(%esp), %ecx
+ movl %ecx, 72(%esp) # 4-byte Spill
+ movl 88(%esp), %ecx # 4-byte Reload
+ adcl 2096(%esp), %ecx
+ movl %ecx, 88(%esp) # 4-byte Spill
+ movl 96(%esp), %ecx # 4-byte Reload
+ adcl 2100(%esp), %ecx
+ movl %ecx, 96(%esp) # 4-byte Spill
+ movl 92(%esp), %ecx # 4-byte Reload
+ adcl 2104(%esp), %ecx
+ movl %ecx, 92(%esp) # 4-byte Spill
+ movl 104(%esp), %ecx # 4-byte Reload
+ adcl 2108(%esp), %ecx
+ movl %ecx, 104(%esp) # 4-byte Spill
+ movl 100(%esp), %ecx # 4-byte Reload
+ adcl 2112(%esp), %ecx
+ movl %ecx, 100(%esp) # 4-byte Spill
+ adcl 2116(%esp), %esi
+ movl %esi, 108(%esp) # 4-byte Spill
+ adcl $0, %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl %edx, %esi
+ movl %esi, %eax
+ imull 44(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 1984(%esp), %ecx
+ movl 2444(%esp), %edx
+ calll .LmulPv512x32
+ addl 1984(%esp), %esi
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 1988(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 1992(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 1996(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 2000(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 60(%esp), %edi # 4-byte Reload
+ adcl 2004(%esp), %edi
+ adcl 2008(%esp), %ebp
+ movl %ebp, 48(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 2012(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 2016(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 2020(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 2024(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 2028(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 2032(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 104(%esp), %ebp # 4-byte Reload
+ adcl 2036(%esp), %ebp
+ movl 100(%esp), %esi # 4-byte Reload
+ adcl 2040(%esp), %esi
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 2044(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 2048(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 2440(%esp), %eax
+ movl 12(%eax), %eax
+ movl %eax, (%esp)
+ leal 1912(%esp), %ecx
+ movl 2436(%esp), %edx
+ calll .LmulPv512x32
+ movl 1976(%esp), %eax
+ movl 76(%esp), %edx # 4-byte Reload
+ addl 1912(%esp), %edx
+ movl 80(%esp), %ecx # 4-byte Reload
+ adcl 1916(%esp), %ecx
+ movl %ecx, 80(%esp) # 4-byte Spill
+ movl 68(%esp), %ecx # 4-byte Reload
+ adcl 1920(%esp), %ecx
+ movl %ecx, 68(%esp) # 4-byte Spill
+ movl 64(%esp), %ecx # 4-byte Reload
+ adcl 1924(%esp), %ecx
+ movl %ecx, 64(%esp) # 4-byte Spill
+ adcl 1928(%esp), %edi
+ movl %edi, 60(%esp) # 4-byte Spill
+ movl 48(%esp), %ecx # 4-byte Reload
+ adcl 1932(%esp), %ecx
+ movl %ecx, 48(%esp) # 4-byte Spill
+ movl 52(%esp), %ecx # 4-byte Reload
+ adcl 1936(%esp), %ecx
+ movl %ecx, 52(%esp) # 4-byte Spill
+ movl 56(%esp), %ecx # 4-byte Reload
+ adcl 1940(%esp), %ecx
+ movl %ecx, 56(%esp) # 4-byte Spill
+ movl 72(%esp), %ecx # 4-byte Reload
+ adcl 1944(%esp), %ecx
+ movl %ecx, 72(%esp) # 4-byte Spill
+ movl 88(%esp), %edi # 4-byte Reload
+ adcl 1948(%esp), %edi
+ movl 96(%esp), %ecx # 4-byte Reload
+ adcl 1952(%esp), %ecx
+ movl %ecx, 96(%esp) # 4-byte Spill
+ movl 92(%esp), %ecx # 4-byte Reload
+ adcl 1956(%esp), %ecx
+ movl %ecx, 92(%esp) # 4-byte Spill
+ adcl 1960(%esp), %ebp
+ movl %ebp, 104(%esp) # 4-byte Spill
+ adcl 1964(%esp), %esi
+ movl 108(%esp), %ecx # 4-byte Reload
+ adcl 1968(%esp), %ecx
+ movl %ecx, 108(%esp) # 4-byte Spill
+ movl 84(%esp), %ecx # 4-byte Reload
+ adcl 1972(%esp), %ecx
+ movl %ecx, 84(%esp) # 4-byte Spill
+ adcl $0, %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl %edx, %ebp
+ movl %ebp, %eax
+ imull 44(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 1840(%esp), %ecx
+ movl 2444(%esp), %eax
+ movl %eax, %edx
+ calll .LmulPv512x32
+ addl 1840(%esp), %ebp
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 1844(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 1848(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 1852(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 1856(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 1860(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 1864(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 1868(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 1872(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ adcl 1876(%esp), %edi
+ movl %edi, 88(%esp) # 4-byte Spill
+ movl 96(%esp), %edi # 4-byte Reload
+ adcl 1880(%esp), %edi
+ movl 92(%esp), %ebp # 4-byte Reload
+ adcl 1884(%esp), %ebp
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 1888(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ adcl 1892(%esp), %esi
+ movl %esi, 100(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 1896(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 1900(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 76(%esp), %esi # 4-byte Reload
+ adcl 1904(%esp), %esi
+ movl 2440(%esp), %eax
+ movl 16(%eax), %eax
+ movl %eax, (%esp)
+ leal 1768(%esp), %ecx
+ movl 2436(%esp), %eax
+ movl %eax, %edx
+ calll .LmulPv512x32
+ movl 1832(%esp), %edx
+ movl 80(%esp), %ecx # 4-byte Reload
+ addl 1768(%esp), %ecx
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 1772(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 1776(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 1780(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 1784(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 1788(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 1792(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 1796(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 1800(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ adcl 1804(%esp), %edi
+ movl %edi, 96(%esp) # 4-byte Spill
+ adcl 1808(%esp), %ebp
+ movl %ebp, 92(%esp) # 4-byte Spill
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 1812(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 1816(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 1820(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 1824(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ adcl 1828(%esp), %esi
+ movl %esi, 76(%esp) # 4-byte Spill
+ adcl $0, %edx
+ movl %edx, 80(%esp) # 4-byte Spill
+ movl %ecx, %esi
+ movl %esi, %eax
+ imull 44(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 1696(%esp), %ecx
+ movl 2444(%esp), %edx
+ calll .LmulPv512x32
+ addl 1696(%esp), %esi
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 1700(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 1704(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 1708(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 1712(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 52(%esp), %ebp # 4-byte Reload
+ adcl 1716(%esp), %ebp
+ movl 56(%esp), %edi # 4-byte Reload
+ adcl 1720(%esp), %edi
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 1724(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 1728(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 1732(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 1736(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 104(%esp), %esi # 4-byte Reload
+ adcl 1740(%esp), %esi
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 1744(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 1748(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 1752(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 1756(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 1760(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 2440(%esp), %eax
+ movl 20(%eax), %eax
+ movl %eax, (%esp)
+ leal 1624(%esp), %ecx
+ movl 2436(%esp), %edx
+ calll .LmulPv512x32
+ movl 1688(%esp), %edx
+ movl 68(%esp), %ecx # 4-byte Reload
+ addl 1624(%esp), %ecx
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 1628(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 1632(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 1636(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ adcl 1640(%esp), %ebp
+ movl %ebp, 52(%esp) # 4-byte Spill
+ adcl 1644(%esp), %edi
+ movl %edi, 56(%esp) # 4-byte Spill
+ movl 72(%esp), %ebp # 4-byte Reload
+ adcl 1648(%esp), %ebp
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 1652(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 1656(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 1660(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ adcl 1664(%esp), %esi
+ movl %esi, %edi
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 1668(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 1672(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 1676(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 1680(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 1684(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ adcl $0, %edx
+ movl %edx, 68(%esp) # 4-byte Spill
+ movl %ecx, %esi
+ movl %esi, %eax
+ imull 44(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 1552(%esp), %ecx
+ movl 2444(%esp), %edx
+ calll .LmulPv512x32
+ addl 1552(%esp), %esi
+ movl 64(%esp), %esi # 4-byte Reload
+ adcl 1556(%esp), %esi
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 1560(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 1564(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 1568(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 1572(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ adcl 1576(%esp), %ebp
+ movl %ebp, 72(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 1580(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 1584(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 1588(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ adcl 1592(%esp), %edi
+ movl %edi, 104(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 1596(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 108(%esp), %ebp # 4-byte Reload
+ adcl 1600(%esp), %ebp
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 1604(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 1608(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 1612(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 68(%esp), %edi # 4-byte Reload
+ adcl 1616(%esp), %edi
+ movl 2440(%esp), %eax
+ movl 24(%eax), %eax
+ movl %eax, (%esp)
+ leal 1480(%esp), %ecx
+ movl 2436(%esp), %edx
+ calll .LmulPv512x32
+ movl 1544(%esp), %eax
+ addl 1480(%esp), %esi
+ movl 60(%esp), %edx # 4-byte Reload
+ adcl 1484(%esp), %edx
+ movl %edx, 60(%esp) # 4-byte Spill
+ movl 48(%esp), %edx # 4-byte Reload
+ adcl 1488(%esp), %edx
+ movl %edx, 48(%esp) # 4-byte Spill
+ movl 52(%esp), %edx # 4-byte Reload
+ adcl 1492(%esp), %edx
+ movl %edx, 52(%esp) # 4-byte Spill
+ movl 56(%esp), %edx # 4-byte Reload
+ adcl 1496(%esp), %edx
+ movl %edx, 56(%esp) # 4-byte Spill
+ movl 72(%esp), %edx # 4-byte Reload
+ adcl 1500(%esp), %edx
+ movl %edx, 72(%esp) # 4-byte Spill
+ movl 88(%esp), %edx # 4-byte Reload
+ adcl 1504(%esp), %edx
+ movl %edx, 88(%esp) # 4-byte Spill
+ movl 96(%esp), %edx # 4-byte Reload
+ adcl 1508(%esp), %edx
+ movl %edx, 96(%esp) # 4-byte Spill
+ movl 92(%esp), %edx # 4-byte Reload
+ adcl 1512(%esp), %edx
+ movl %edx, 92(%esp) # 4-byte Spill
+ movl 104(%esp), %edx # 4-byte Reload
+ adcl 1516(%esp), %edx
+ movl %edx, 104(%esp) # 4-byte Spill
+ movl 100(%esp), %edx # 4-byte Reload
+ adcl 1520(%esp), %edx
+ movl %edx, 100(%esp) # 4-byte Spill
+ adcl 1524(%esp), %ebp
+ movl %ebp, 108(%esp) # 4-byte Spill
+ movl 84(%esp), %edx # 4-byte Reload
+ adcl 1528(%esp), %edx
+ movl %edx, 84(%esp) # 4-byte Spill
+ movl 76(%esp), %edx # 4-byte Reload
+ adcl 1532(%esp), %edx
+ movl %edx, 76(%esp) # 4-byte Spill
+ movl 80(%esp), %ebp # 4-byte Reload
+ adcl 1536(%esp), %ebp
+ adcl 1540(%esp), %edi
+ movl %edi, 68(%esp) # 4-byte Spill
+ movl %eax, %edi
+ adcl $0, %edi
+ movl %esi, %eax
+ imull 44(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 1408(%esp), %ecx
+ movl 2444(%esp), %edx
+ calll .LmulPv512x32
+ addl 1408(%esp), %esi
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 1412(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 48(%esp), %esi # 4-byte Reload
+ adcl 1416(%esp), %esi
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 1420(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 1424(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 1428(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 1432(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 1436(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 1440(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 1444(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 1448(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 1452(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 1456(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 1460(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ adcl 1464(%esp), %ebp
+ movl %ebp, 80(%esp) # 4-byte Spill
+ movl 68(%esp), %ebp # 4-byte Reload
+ adcl 1468(%esp), %ebp
+ adcl 1472(%esp), %edi
+ movl %edi, 64(%esp) # 4-byte Spill
+ movl 2440(%esp), %eax
+ movl 28(%eax), %eax
+ movl %eax, (%esp)
+ leal 1336(%esp), %ecx
+ movl 2436(%esp), %edx
+ calll .LmulPv512x32
+ movl 1400(%esp), %eax
+ movl 60(%esp), %ecx # 4-byte Reload
+ addl 1336(%esp), %ecx
+ adcl 1340(%esp), %esi
+ movl %esi, 48(%esp) # 4-byte Spill
+ movl 52(%esp), %edx # 4-byte Reload
+ adcl 1344(%esp), %edx
+ movl %edx, 52(%esp) # 4-byte Spill
+ movl 56(%esp), %edx # 4-byte Reload
+ adcl 1348(%esp), %edx
+ movl %edx, 56(%esp) # 4-byte Spill
+ movl 72(%esp), %edx # 4-byte Reload
+ adcl 1352(%esp), %edx
+ movl %edx, 72(%esp) # 4-byte Spill
+ movl 88(%esp), %edx # 4-byte Reload
+ adcl 1356(%esp), %edx
+ movl %edx, 88(%esp) # 4-byte Spill
+ movl 96(%esp), %edx # 4-byte Reload
+ adcl 1360(%esp), %edx
+ movl %edx, 96(%esp) # 4-byte Spill
+ movl 92(%esp), %edx # 4-byte Reload
+ adcl 1364(%esp), %edx
+ movl %edx, 92(%esp) # 4-byte Spill
+ movl 104(%esp), %edx # 4-byte Reload
+ adcl 1368(%esp), %edx
+ movl %edx, 104(%esp) # 4-byte Spill
+ movl 100(%esp), %edx # 4-byte Reload
+ adcl 1372(%esp), %edx
+ movl %edx, 100(%esp) # 4-byte Spill
+ movl 108(%esp), %edx # 4-byte Reload
+ adcl 1376(%esp), %edx
+ movl %edx, 108(%esp) # 4-byte Spill
+ movl 84(%esp), %edi # 4-byte Reload
+ adcl 1380(%esp), %edi
+ movl 76(%esp), %esi # 4-byte Reload
+ adcl 1384(%esp), %esi
+ movl 80(%esp), %edx # 4-byte Reload
+ adcl 1388(%esp), %edx
+ movl %edx, 80(%esp) # 4-byte Spill
+ adcl 1392(%esp), %ebp
+ movl %ebp, 68(%esp) # 4-byte Spill
+ movl 64(%esp), %edx # 4-byte Reload
+ adcl 1396(%esp), %edx
+ movl %edx, 64(%esp) # 4-byte Spill
+ adcl $0, %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ movl %ecx, %ebp
+ imull 44(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 1264(%esp), %ecx
+ movl 2444(%esp), %edx
+ calll .LmulPv512x32
+ addl 1264(%esp), %ebp
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 1268(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 1272(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 1276(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 1280(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 1284(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 1288(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 1292(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 1296(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 1300(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 1304(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ adcl 1308(%esp), %edi
+ movl %edi, 84(%esp) # 4-byte Spill
+ adcl 1312(%esp), %esi
+ movl %esi, %ebp
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 1316(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 1320(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 1324(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 1328(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 2440(%esp), %eax
+ movl 32(%eax), %eax
+ movl %eax, (%esp)
+ leal 1192(%esp), %ecx
+ movl 2436(%esp), %edx
+ calll .LmulPv512x32
+ movl 1256(%esp), %eax
+ movl 48(%esp), %ecx # 4-byte Reload
+ addl 1192(%esp), %ecx
+ movl 52(%esp), %edx # 4-byte Reload
+ adcl 1196(%esp), %edx
+ movl %edx, 52(%esp) # 4-byte Spill
+ movl 56(%esp), %edx # 4-byte Reload
+ adcl 1200(%esp), %edx
+ movl %edx, 56(%esp) # 4-byte Spill
+ movl 72(%esp), %esi # 4-byte Reload
+ adcl 1204(%esp), %esi
+ movl 88(%esp), %edx # 4-byte Reload
+ adcl 1208(%esp), %edx
+ movl %edx, 88(%esp) # 4-byte Spill
+ movl 96(%esp), %edi # 4-byte Reload
+ adcl 1212(%esp), %edi
+ movl 92(%esp), %edx # 4-byte Reload
+ adcl 1216(%esp), %edx
+ movl %edx, 92(%esp) # 4-byte Spill
+ movl 104(%esp), %edx # 4-byte Reload
+ adcl 1220(%esp), %edx
+ movl %edx, 104(%esp) # 4-byte Spill
+ movl 100(%esp), %edx # 4-byte Reload
+ adcl 1224(%esp), %edx
+ movl %edx, 100(%esp) # 4-byte Spill
+ movl 108(%esp), %edx # 4-byte Reload
+ adcl 1228(%esp), %edx
+ movl %edx, 108(%esp) # 4-byte Spill
+ movl 84(%esp), %edx # 4-byte Reload
+ adcl 1232(%esp), %edx
+ movl %edx, 84(%esp) # 4-byte Spill
+ adcl 1236(%esp), %ebp
+ movl %ebp, 76(%esp) # 4-byte Spill
+ movl 80(%esp), %edx # 4-byte Reload
+ adcl 1240(%esp), %edx
+ movl %edx, 80(%esp) # 4-byte Spill
+ movl 68(%esp), %edx # 4-byte Reload
+ adcl 1244(%esp), %edx
+ movl %edx, 68(%esp) # 4-byte Spill
+ movl 64(%esp), %edx # 4-byte Reload
+ adcl 1248(%esp), %edx
+ movl %edx, 64(%esp) # 4-byte Spill
+ movl 60(%esp), %edx # 4-byte Reload
+ adcl 1252(%esp), %edx
+ movl %edx, 60(%esp) # 4-byte Spill
+ adcl $0, %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ movl %ecx, %ebp
+ imull 44(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 1120(%esp), %ecx
+ movl 2444(%esp), %edx
+ calll .LmulPv512x32
+ addl 1120(%esp), %ebp
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 1124(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 1128(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ adcl 1132(%esp), %esi
+ movl %esi, 72(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 1136(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ adcl 1140(%esp), %edi
+ movl %edi, 96(%esp) # 4-byte Spill
+ movl 92(%esp), %ebp # 4-byte Reload
+ adcl 1144(%esp), %ebp
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 1148(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 1152(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 1156(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 1160(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 1164(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 1168(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 1172(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 64(%esp), %edi # 4-byte Reload
+ adcl 1176(%esp), %edi
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 1180(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 1184(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 2440(%esp), %eax
+ movl 36(%eax), %eax
+ movl %eax, (%esp)
+ leal 1048(%esp), %ecx
+ movl 2436(%esp), %edx
+ calll .LmulPv512x32
+ movl 1112(%esp), %edx
+ movl 52(%esp), %ecx # 4-byte Reload
+ addl 1048(%esp), %ecx
+ movl 56(%esp), %esi # 4-byte Reload
+ adcl 1052(%esp), %esi
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 1056(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 1060(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 1064(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ adcl 1068(%esp), %ebp
+ movl %ebp, 92(%esp) # 4-byte Spill
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 1072(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 100(%esp), %ebp # 4-byte Reload
+ adcl 1076(%esp), %ebp
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 1080(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 1084(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 1088(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 1092(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 1096(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ adcl 1100(%esp), %edi
+ movl %edi, 64(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 1104(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 1108(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ adcl $0, %edx
+ movl %edx, 52(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ movl %ecx, %edi
+ imull 44(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 976(%esp), %ecx
+ movl 2444(%esp), %edx
+ calll .LmulPv512x32
+ addl 976(%esp), %edi
+ adcl 980(%esp), %esi
+ movl %esi, 56(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 984(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 988(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 992(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 996(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 104(%esp), %edi # 4-byte Reload
+ adcl 1000(%esp), %edi
+ adcl 1004(%esp), %ebp
+ movl %ebp, 100(%esp) # 4-byte Spill
+ movl 108(%esp), %esi # 4-byte Reload
+ adcl 1008(%esp), %esi
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 1012(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 76(%esp), %ebp # 4-byte Reload
+ adcl 1016(%esp), %ebp
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 1020(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 1024(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 1028(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 1032(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 1036(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 1040(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 2440(%esp), %eax
+ movl 40(%eax), %eax
+ movl %eax, (%esp)
+ leal 904(%esp), %ecx
+ movl 2436(%esp), %edx
+ calll .LmulPv512x32
+ movl 968(%esp), %ecx
+ movl 56(%esp), %eax # 4-byte Reload
+ addl 904(%esp), %eax
+ movl 72(%esp), %edx # 4-byte Reload
+ adcl 908(%esp), %edx
+ movl %edx, 72(%esp) # 4-byte Spill
+ movl 88(%esp), %edx # 4-byte Reload
+ adcl 912(%esp), %edx
+ movl %edx, 88(%esp) # 4-byte Spill
+ movl 96(%esp), %edx # 4-byte Reload
+ adcl 916(%esp), %edx
+ movl %edx, 96(%esp) # 4-byte Spill
+ movl 92(%esp), %edx # 4-byte Reload
+ adcl 920(%esp), %edx
+ movl %edx, 92(%esp) # 4-byte Spill
+ adcl 924(%esp), %edi
+ movl %edi, 104(%esp) # 4-byte Spill
+ movl 100(%esp), %edi # 4-byte Reload
+ adcl 928(%esp), %edi
+ adcl 932(%esp), %esi
+ movl %esi, 108(%esp) # 4-byte Spill
+ movl 84(%esp), %edx # 4-byte Reload
+ adcl 936(%esp), %edx
+ movl %edx, 84(%esp) # 4-byte Spill
+ adcl 940(%esp), %ebp
+ movl %ebp, 76(%esp) # 4-byte Spill
+ movl 80(%esp), %edx # 4-byte Reload
+ adcl 944(%esp), %edx
+ movl %edx, 80(%esp) # 4-byte Spill
+ movl 68(%esp), %edx # 4-byte Reload
+ adcl 948(%esp), %edx
+ movl %edx, 68(%esp) # 4-byte Spill
+ movl 64(%esp), %edx # 4-byte Reload
+ adcl 952(%esp), %edx
+ movl %edx, 64(%esp) # 4-byte Spill
+ movl 60(%esp), %edx # 4-byte Reload
+ adcl 956(%esp), %edx
+ movl %edx, 60(%esp) # 4-byte Spill
+ movl 48(%esp), %ebp # 4-byte Reload
+ adcl 960(%esp), %ebp
+ movl 52(%esp), %edx # 4-byte Reload
+ adcl 964(%esp), %edx
+ movl %edx, 52(%esp) # 4-byte Spill
+ adcl $0, %ecx
+ movl %ecx, 56(%esp) # 4-byte Spill
+ movl %eax, %esi
+ imull 44(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 832(%esp), %ecx
+ movl 2444(%esp), %edx
+ calll .LmulPv512x32
+ addl 832(%esp), %esi
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 836(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 840(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 844(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 848(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 852(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ adcl 856(%esp), %edi
+ movl %edi, 100(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 860(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 864(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 868(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 80(%esp), %esi # 4-byte Reload
+ adcl 872(%esp), %esi
+ movl 68(%esp), %edi # 4-byte Reload
+ adcl 876(%esp), %edi
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 880(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 884(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ adcl 888(%esp), %ebp
+ movl %ebp, 48(%esp) # 4-byte Spill
+ movl 52(%esp), %ebp # 4-byte Reload
+ adcl 892(%esp), %ebp
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 896(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 2440(%esp), %eax
+ movl 44(%eax), %eax
+ movl %eax, (%esp)
+ leal 760(%esp), %ecx
+ movl 2436(%esp), %edx
+ calll .LmulPv512x32
+ movl 824(%esp), %edx
+ movl 72(%esp), %ecx # 4-byte Reload
+ addl 760(%esp), %ecx
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 764(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 768(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 772(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 776(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 780(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 784(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 788(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 792(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ adcl 796(%esp), %esi
+ movl %esi, 80(%esp) # 4-byte Spill
+ adcl 800(%esp), %edi
+ movl %edi, 68(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 804(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 60(%esp), %edi # 4-byte Reload
+ adcl 808(%esp), %edi
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 812(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ adcl 816(%esp), %ebp
+ movl %ebp, 52(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 820(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ adcl $0, %edx
+ movl %edx, 72(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ movl %ecx, %esi
+ imull 44(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 688(%esp), %ecx
+ movl 2444(%esp), %edx
+ calll .LmulPv512x32
+ addl 688(%esp), %esi
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 692(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 696(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 700(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 704(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 708(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 712(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 84(%esp), %esi # 4-byte Reload
+ adcl 716(%esp), %esi
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 720(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 724(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 728(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 64(%esp), %ebp # 4-byte Reload
+ adcl 732(%esp), %ebp
+ adcl 736(%esp), %edi
+ movl %edi, 60(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 740(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 744(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 748(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 752(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 2440(%esp), %eax
+ movl 48(%eax), %eax
+ movl %eax, (%esp)
+ leal 616(%esp), %ecx
+ movl 2436(%esp), %edx
+ calll .LmulPv512x32
+ movl 680(%esp), %edx
+ movl 88(%esp), %ecx # 4-byte Reload
+ addl 616(%esp), %ecx
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 620(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 92(%esp), %edi # 4-byte Reload
+ adcl 624(%esp), %edi
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 628(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 632(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 636(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ adcl 640(%esp), %esi
+ movl %esi, 84(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 644(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 648(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 652(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ adcl 656(%esp), %ebp
+ movl %ebp, 64(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 660(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 664(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 668(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 56(%esp), %ebp # 4-byte Reload
+ adcl 672(%esp), %ebp
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 676(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ adcl $0, %edx
+ movl %edx, 88(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ movl %ecx, %esi
+ imull 44(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 544(%esp), %ecx
+ movl 2444(%esp), %edx
+ calll .LmulPv512x32
+ addl 544(%esp), %esi
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 548(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ adcl 552(%esp), %edi
+ movl %edi, 92(%esp) # 4-byte Spill
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 556(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 100(%esp), %edi # 4-byte Reload
+ adcl 560(%esp), %edi
+ movl 108(%esp), %esi # 4-byte Reload
+ adcl 564(%esp), %esi
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 568(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 572(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 576(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 580(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 584(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 588(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 592(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 596(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ adcl 600(%esp), %ebp
+ movl %ebp, 56(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 604(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 608(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 2440(%esp), %eax
+ movl 52(%eax), %eax
+ movl %eax, (%esp)
+ leal 472(%esp), %ecx
+ movl 2436(%esp), %edx
+ calll .LmulPv512x32
+ movl 536(%esp), %edx
+ movl 96(%esp), %ecx # 4-byte Reload
+ addl 472(%esp), %ecx
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 476(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 480(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ adcl 484(%esp), %edi
+ movl %edi, 100(%esp) # 4-byte Spill
+ adcl 488(%esp), %esi
+ movl %esi, 108(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 492(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 76(%esp), %ebp # 4-byte Reload
+ adcl 496(%esp), %ebp
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 500(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 504(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 508(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 512(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 516(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 520(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 524(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 528(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 532(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ adcl $0, %edx
+ movl %edx, 96(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ movl %ecx, %edi
+ imull 44(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 400(%esp), %ecx
+ movl 2444(%esp), %edx
+ calll .LmulPv512x32
+ addl 400(%esp), %edi
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 404(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 408(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 100(%esp), %esi # 4-byte Reload
+ adcl 412(%esp), %esi
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 416(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 84(%esp), %edi # 4-byte Reload
+ adcl 420(%esp), %edi
+ adcl 424(%esp), %ebp
+ movl %ebp, 76(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 428(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 432(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 436(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 440(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 48(%esp), %ebp # 4-byte Reload
+ adcl 444(%esp), %ebp
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 448(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 452(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 456(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 460(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 464(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 2440(%esp), %eax
+ movl 56(%eax), %eax
+ movl %eax, (%esp)
+ leal 328(%esp), %ecx
+ movl 2436(%esp), %edx
+ calll .LmulPv512x32
+ movl 392(%esp), %edx
+ movl 92(%esp), %ecx # 4-byte Reload
+ addl 328(%esp), %ecx
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 332(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ adcl 336(%esp), %esi
+ movl %esi, 100(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 340(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ adcl 344(%esp), %edi
+ movl %edi, 84(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 348(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 80(%esp), %esi # 4-byte Reload
+ adcl 352(%esp), %esi
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 356(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 360(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 364(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ adcl 368(%esp), %ebp
+ movl %ebp, 48(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 372(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 376(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 380(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 384(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 388(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ adcl $0, %edx
+ movl %edx, 92(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ movl %ecx, %ebp
+ imull 44(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 256(%esp), %ecx
+ movl 2444(%esp), %edx
+ calll .LmulPv512x32
+ addl 256(%esp), %ebp
+ movl 104(%esp), %edi # 4-byte Reload
+ adcl 260(%esp), %edi
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 264(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 108(%esp), %ebp # 4-byte Reload
+ adcl 268(%esp), %ebp
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 272(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 276(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ adcl 280(%esp), %esi
+ movl %esi, 80(%esp) # 4-byte Spill
+ movl 68(%esp), %esi # 4-byte Reload
+ adcl 284(%esp), %esi
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 288(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 292(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 296(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 300(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 304(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 308(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 312(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 316(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 320(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 2440(%esp), %eax
+ movl 60(%eax), %eax
+ movl %eax, (%esp)
+ leal 184(%esp), %ecx
+ movl 2436(%esp), %edx
+ calll .LmulPv512x32
+ movl 248(%esp), %edx
+ movl %edi, %ecx
+ addl 184(%esp), %ecx
+ movl 100(%esp), %edi # 4-byte Reload
+ adcl 188(%esp), %edi
+ adcl 192(%esp), %ebp
+ movl %ebp, 108(%esp) # 4-byte Spill
+ movl 84(%esp), %ebp # 4-byte Reload
+ adcl 196(%esp), %ebp
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 200(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 204(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ adcl 208(%esp), %esi
+ movl %esi, 68(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 212(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 216(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ adcl 220(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 224(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 228(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 232(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 236(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 240(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 244(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ adcl $0, %edx
+ movl %edx, 104(%esp) # 4-byte Spill
+ movl 44(%esp), %eax # 4-byte Reload
+ imull %ecx, %eax
+ movl %ecx, %esi
+ movl %eax, (%esp)
+ leal 112(%esp), %ecx
+ movl 2444(%esp), %edx
+ calll .LmulPv512x32
+ addl 112(%esp), %esi
+ movl %edi, %eax
+ adcl 116(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 108(%esp), %edi # 4-byte Reload
+ adcl 120(%esp), %edi
+ movl %edi, 108(%esp) # 4-byte Spill
+ adcl 124(%esp), %ebp
+ movl 76(%esp), %ecx # 4-byte Reload
+ adcl 128(%esp), %ecx
+ movl %ecx, 76(%esp) # 4-byte Spill
+ movl %ecx, %ebx
+ movl 80(%esp), %ecx # 4-byte Reload
+ adcl 132(%esp), %ecx
+ movl %ecx, 80(%esp) # 4-byte Spill
+ movl 68(%esp), %ecx # 4-byte Reload
+ adcl 136(%esp), %ecx
+ movl %ecx, 68(%esp) # 4-byte Spill
+ movl 64(%esp), %ecx # 4-byte Reload
+ adcl 140(%esp), %ecx
+ movl %ecx, 64(%esp) # 4-byte Spill
+ movl 60(%esp), %ecx # 4-byte Reload
+ adcl 144(%esp), %ecx
+ movl %ecx, 60(%esp) # 4-byte Spill
+ movl 48(%esp), %ecx # 4-byte Reload
+ adcl 148(%esp), %ecx
+ movl %ecx, 48(%esp) # 4-byte Spill
+ movl 52(%esp), %ecx # 4-byte Reload
+ adcl 152(%esp), %ecx
+ movl %ecx, 52(%esp) # 4-byte Spill
+ movl 56(%esp), %ecx # 4-byte Reload
+ adcl 156(%esp), %ecx
+ movl %ecx, 56(%esp) # 4-byte Spill
+ movl 72(%esp), %ecx # 4-byte Reload
+ adcl 160(%esp), %ecx
+ movl %ecx, 72(%esp) # 4-byte Spill
+ movl 88(%esp), %ecx # 4-byte Reload
+ adcl 164(%esp), %ecx
+ movl %ecx, 88(%esp) # 4-byte Spill
+ movl 96(%esp), %ecx # 4-byte Reload
+ adcl 168(%esp), %ecx
+ movl %ecx, 96(%esp) # 4-byte Spill
+ movl 92(%esp), %ecx # 4-byte Reload
+ adcl 172(%esp), %ecx
+ movl %ecx, 92(%esp) # 4-byte Spill
+ movl 104(%esp), %ecx # 4-byte Reload
+ adcl 176(%esp), %ecx
+ movl %ecx, 104(%esp) # 4-byte Spill
+ movl %eax, %edx
+ movl 2444(%esp), %esi
+ subl (%esi), %edx
+ sbbl 4(%esi), %edi
+ movl %ebp, %ecx
+ sbbl 8(%esi), %ecx
+ movl %ebx, %eax
+ sbbl 12(%esi), %eax
+ movl 80(%esp), %ebx # 4-byte Reload
+ sbbl 16(%esi), %ebx
+ movl %ebx, 4(%esp) # 4-byte Spill
+ movl 68(%esp), %ebx # 4-byte Reload
+ sbbl 20(%esi), %ebx
+ movl %ebx, 8(%esp) # 4-byte Spill
+ movl 64(%esp), %ebx # 4-byte Reload
+ sbbl 24(%esi), %ebx
+ movl %ebx, 12(%esp) # 4-byte Spill
+ movl 60(%esp), %ebx # 4-byte Reload
+ sbbl 28(%esi), %ebx
+ movl %ebx, 16(%esp) # 4-byte Spill
+ movl 48(%esp), %ebx # 4-byte Reload
+ sbbl 32(%esi), %ebx
+ movl %ebx, 20(%esp) # 4-byte Spill
+ movl 52(%esp), %ebx # 4-byte Reload
+ sbbl 36(%esi), %ebx
+ movl %ebx, 24(%esp) # 4-byte Spill
+ movl 56(%esp), %ebx # 4-byte Reload
+ sbbl 40(%esi), %ebx
+ movl %ebx, 28(%esp) # 4-byte Spill
+ movl 72(%esp), %ebx # 4-byte Reload
+ sbbl 44(%esi), %ebx
+ movl %ebx, 32(%esp) # 4-byte Spill
+ movl 88(%esp), %ebx # 4-byte Reload
+ sbbl 48(%esi), %ebx
+ movl %ebx, 36(%esp) # 4-byte Spill
+ movl 96(%esp), %ebx # 4-byte Reload
+ sbbl 52(%esi), %ebx
+ movl %ebx, 40(%esp) # 4-byte Spill
+ movl 92(%esp), %ebx # 4-byte Reload
+ sbbl 56(%esi), %ebx
+ movl %ebx, 44(%esp) # 4-byte Spill
+ movl 104(%esp), %ebx # 4-byte Reload
+ sbbl 60(%esi), %ebx
+ movl %ebx, 84(%esp) # 4-byte Spill
+ movl 100(%esp), %esi # 4-byte Reload
+ testl %ebx, %ebx
+ js .LBB243_2
+# BB#1:
+ movl %edx, %esi
+.LBB243_2:
+ movl 2432(%esp), %edx
+ movl %esi, (%edx)
+ movl 108(%esp), %esi # 4-byte Reload
+ js .LBB243_4
+# BB#3:
+ movl %edi, %esi
+.LBB243_4:
+ movl %esi, 4(%edx)
+ js .LBB243_6
+# BB#5:
+ movl %ecx, %ebp
+.LBB243_6:
+ movl %ebp, 8(%edx)
+ movl 76(%esp), %ecx # 4-byte Reload
+ js .LBB243_8
+# BB#7:
+ movl %eax, %ecx
+.LBB243_8:
+ movl %ecx, 12(%edx)
+ movl 80(%esp), %eax # 4-byte Reload
+ js .LBB243_10
+# BB#9:
+ movl 4(%esp), %eax # 4-byte Reload
+.LBB243_10:
+ movl %eax, 16(%edx)
+ movl 68(%esp), %eax # 4-byte Reload
+ js .LBB243_12
+# BB#11:
+ movl 8(%esp), %eax # 4-byte Reload
+.LBB243_12:
+ movl %eax, 20(%edx)
+ movl 64(%esp), %eax # 4-byte Reload
+ js .LBB243_14
+# BB#13:
+ movl 12(%esp), %eax # 4-byte Reload
+.LBB243_14:
+ movl %eax, 24(%edx)
+ movl 60(%esp), %eax # 4-byte Reload
+ js .LBB243_16
+# BB#15:
+ movl 16(%esp), %eax # 4-byte Reload
+.LBB243_16:
+ movl %eax, 28(%edx)
+ movl 48(%esp), %eax # 4-byte Reload
+ js .LBB243_18
+# BB#17:
+ movl 20(%esp), %eax # 4-byte Reload
+.LBB243_18:
+ movl %eax, 32(%edx)
+ movl 52(%esp), %eax # 4-byte Reload
+ js .LBB243_20
+# BB#19:
+ movl 24(%esp), %eax # 4-byte Reload
+.LBB243_20:
+ movl %eax, 36(%edx)
+ movl 56(%esp), %eax # 4-byte Reload
+ js .LBB243_22
+# BB#21:
+ movl 28(%esp), %eax # 4-byte Reload
+.LBB243_22:
+ movl %eax, 40(%edx)
+ movl 72(%esp), %eax # 4-byte Reload
+ js .LBB243_24
+# BB#23:
+ movl 32(%esp), %eax # 4-byte Reload
+.LBB243_24:
+ movl %eax, 44(%edx)
+ movl 88(%esp), %eax # 4-byte Reload
+ js .LBB243_26
+# BB#25:
+ movl 36(%esp), %eax # 4-byte Reload
+.LBB243_26:
+ movl %eax, 48(%edx)
+ movl 96(%esp), %eax # 4-byte Reload
+ js .LBB243_28
+# BB#27:
+ movl 40(%esp), %eax # 4-byte Reload
+.LBB243_28:
+ movl %eax, 52(%edx)
+ movl 92(%esp), %eax # 4-byte Reload
+ js .LBB243_30
+# BB#29:
+ movl 44(%esp), %eax # 4-byte Reload
+.LBB243_30:
+ movl %eax, 56(%edx)
+ movl 104(%esp), %eax # 4-byte Reload
+ js .LBB243_32
+# BB#31:
+ movl 84(%esp), %eax # 4-byte Reload
+.LBB243_32:
+ movl %eax, 60(%edx)
+ addl $2412, %esp # imm = 0x96C
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end243:
+ .size mcl_fp_montNF16L, .Lfunc_end243-mcl_fp_montNF16L
+
+ .globl mcl_fp_montRed16L
+ .align 16, 0x90
+ .type mcl_fp_montRed16L,@function
+mcl_fp_montRed16L: # @mcl_fp_montRed16L
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $1356, %esp # imm = 0x54C
+ calll .L244$pb
+.L244$pb:
+ popl %eax
+.Ltmp55:
+ addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp55-.L244$pb), %eax
+ movl %eax, 116(%esp) # 4-byte Spill
+ movl 1384(%esp), %edx
+ movl -4(%edx), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 1380(%esp), %ecx
+ movl (%ecx), %ebx
+ movl %ebx, 112(%esp) # 4-byte Spill
+ movl 4(%ecx), %edi
+ movl %edi, 120(%esp) # 4-byte Spill
+ imull %eax, %ebx
+ movl 124(%ecx), %eax
+ movl %eax, 124(%esp) # 4-byte Spill
+ movl 120(%ecx), %eax
+ movl %eax, 128(%esp) # 4-byte Spill
+ movl 116(%ecx), %eax
+ movl %eax, 132(%esp) # 4-byte Spill
+ movl 112(%ecx), %eax
+ movl %eax, 144(%esp) # 4-byte Spill
+ movl 108(%ecx), %eax
+ movl %eax, 156(%esp) # 4-byte Spill
+ movl 104(%ecx), %esi
+ movl %esi, 152(%esp) # 4-byte Spill
+ movl 100(%ecx), %eax
+ movl %eax, 172(%esp) # 4-byte Spill
+ movl 96(%ecx), %eax
+ movl %eax, 176(%esp) # 4-byte Spill
+ movl 92(%ecx), %eax
+ movl %eax, 168(%esp) # 4-byte Spill
+ movl 88(%ecx), %eax
+ movl %eax, 188(%esp) # 4-byte Spill
+ movl 84(%ecx), %eax
+ movl %eax, 184(%esp) # 4-byte Spill
+ movl 80(%ecx), %edi
+ movl %edi, 148(%esp) # 4-byte Spill
+ movl 76(%ecx), %eax
+ movl %eax, 196(%esp) # 4-byte Spill
+ movl 72(%ecx), %esi
+ movl %esi, 192(%esp) # 4-byte Spill
+ movl 68(%ecx), %edi
+ movl %edi, 204(%esp) # 4-byte Spill
+ movl 64(%ecx), %esi
+ movl %esi, 200(%esp) # 4-byte Spill
+ movl 60(%ecx), %edi
+ movl %edi, 180(%esp) # 4-byte Spill
+ movl 56(%ecx), %edi
+ movl %edi, 164(%esp) # 4-byte Spill
+ movl 52(%ecx), %eax
+ movl %eax, 160(%esp) # 4-byte Spill
+ movl 48(%ecx), %eax
+ movl %eax, 136(%esp) # 4-byte Spill
+ movl 44(%ecx), %eax
+ movl %eax, 140(%esp) # 4-byte Spill
+ movl 40(%ecx), %ebp
+ movl 36(%ecx), %edi
+ movl 32(%ecx), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 28(%ecx), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 24(%ecx), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 20(%ecx), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 16(%ecx), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 12(%ecx), %esi
+ movl 8(%ecx), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl (%edx), %ecx
+ movl %ecx, 24(%esp) # 4-byte Spill
+ movl 60(%edx), %ecx
+ movl %ecx, 72(%esp) # 4-byte Spill
+ movl 56(%edx), %ecx
+ movl %ecx, 68(%esp) # 4-byte Spill
+ movl 52(%edx), %ecx
+ movl %ecx, 64(%esp) # 4-byte Spill
+ movl 48(%edx), %ecx
+ movl %ecx, 60(%esp) # 4-byte Spill
+ movl 44(%edx), %ecx
+ movl %ecx, 56(%esp) # 4-byte Spill
+ movl 40(%edx), %ecx
+ movl %ecx, 52(%esp) # 4-byte Spill
+ movl 36(%edx), %ecx
+ movl %ecx, 48(%esp) # 4-byte Spill
+ movl 32(%edx), %ecx
+ movl %ecx, 44(%esp) # 4-byte Spill
+ movl 28(%edx), %ecx
+ movl %ecx, 40(%esp) # 4-byte Spill
+ movl 24(%edx), %ecx
+ movl %ecx, 36(%esp) # 4-byte Spill
+ movl 20(%edx), %ecx
+ movl %ecx, 32(%esp) # 4-byte Spill
+ movl 16(%edx), %ecx
+ movl %ecx, 28(%esp) # 4-byte Spill
+ movl 12(%edx), %ecx
+ movl %ecx, 20(%esp) # 4-byte Spill
+ movl 8(%edx), %ecx
+ movl %ecx, 16(%esp) # 4-byte Spill
+ movl 4(%edx), %ecx
+ movl %ecx, 12(%esp) # 4-byte Spill
+ movl %ebx, (%esp)
+ leal 1288(%esp), %ecx
+ movl 116(%esp), %ebx # 4-byte Reload
+ calll .LmulPv512x32
+ movl 112(%esp), %eax # 4-byte Reload
+ addl 1288(%esp), %eax
+ movl 120(%esp), %ecx # 4-byte Reload
+ adcl 1292(%esp), %ecx
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 1296(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ adcl 1300(%esp), %esi
+ movl %esi, 80(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 1304(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 1308(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 1312(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 1316(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 1320(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ adcl 1324(%esp), %edi
+ movl %edi, 108(%esp) # 4-byte Spill
+ adcl 1328(%esp), %ebp
+ movl %ebp, 120(%esp) # 4-byte Spill
+ movl 140(%esp), %eax # 4-byte Reload
+ adcl 1332(%esp), %eax
+ movl %eax, 140(%esp) # 4-byte Spill
+ movl 136(%esp), %eax # 4-byte Reload
+ adcl 1336(%esp), %eax
+ movl %eax, 136(%esp) # 4-byte Spill
+ movl 160(%esp), %eax # 4-byte Reload
+ adcl 1340(%esp), %eax
+ movl %eax, 160(%esp) # 4-byte Spill
+ movl 164(%esp), %eax # 4-byte Reload
+ adcl 1344(%esp), %eax
+ movl %eax, 164(%esp) # 4-byte Spill
+ movl 180(%esp), %eax # 4-byte Reload
+ adcl 1348(%esp), %eax
+ movl %eax, 180(%esp) # 4-byte Spill
+ movl 200(%esp), %eax # 4-byte Reload
+ adcl 1352(%esp), %eax
+ movl %eax, 200(%esp) # 4-byte Spill
+ adcl $0, 204(%esp) # 4-byte Folded Spill
+ adcl $0, 192(%esp) # 4-byte Folded Spill
+ movl 196(%esp), %edi # 4-byte Reload
+ adcl $0, %edi
+ adcl $0, 148(%esp) # 4-byte Folded Spill
+ adcl $0, 184(%esp) # 4-byte Folded Spill
+ adcl $0, 188(%esp) # 4-byte Folded Spill
+ adcl $0, 168(%esp) # 4-byte Folded Spill
+ adcl $0, 176(%esp) # 4-byte Folded Spill
+ adcl $0, 172(%esp) # 4-byte Folded Spill
+ adcl $0, 152(%esp) # 4-byte Folded Spill
+ adcl $0, 156(%esp) # 4-byte Folded Spill
+ adcl $0, 144(%esp) # 4-byte Folded Spill
+ movl 132(%esp), %ebp # 4-byte Reload
+ adcl $0, %ebp
+ adcl $0, 128(%esp) # 4-byte Folded Spill
+ adcl $0, 124(%esp) # 4-byte Folded Spill
+ sbbl %eax, %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ movl %ecx, %esi
+ imull 100(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 1216(%esp), %ecx
+ movl 1384(%esp), %edx
+ movl 116(%esp), %ebx # 4-byte Reload
+ calll .LmulPv512x32
+ movl 112(%esp), %ecx # 4-byte Reload
+ andl $1, %ecx
+ addl 1216(%esp), %esi
+ movl 76(%esp), %edx # 4-byte Reload
+ adcl 1220(%esp), %edx
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 1224(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 1228(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 1232(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 1236(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 1240(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 1244(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 1248(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 120(%esp), %eax # 4-byte Reload
+ adcl 1252(%esp), %eax
+ movl %eax, 120(%esp) # 4-byte Spill
+ movl 140(%esp), %eax # 4-byte Reload
+ adcl 1256(%esp), %eax
+ movl %eax, 140(%esp) # 4-byte Spill
+ movl 136(%esp), %esi # 4-byte Reload
+ adcl 1260(%esp), %esi
+ movl 160(%esp), %eax # 4-byte Reload
+ adcl 1264(%esp), %eax
+ movl %eax, 160(%esp) # 4-byte Spill
+ movl 164(%esp), %eax # 4-byte Reload
+ adcl 1268(%esp), %eax
+ movl %eax, 164(%esp) # 4-byte Spill
+ movl 180(%esp), %eax # 4-byte Reload
+ adcl 1272(%esp), %eax
+ movl %eax, 180(%esp) # 4-byte Spill
+ movl 200(%esp), %eax # 4-byte Reload
+ adcl 1276(%esp), %eax
+ movl %eax, 200(%esp) # 4-byte Spill
+ movl 204(%esp), %eax # 4-byte Reload
+ adcl 1280(%esp), %eax
+ movl %eax, 204(%esp) # 4-byte Spill
+ adcl $0, 192(%esp) # 4-byte Folded Spill
+ adcl $0, %edi
+ movl %edi, 196(%esp) # 4-byte Spill
+ adcl $0, 148(%esp) # 4-byte Folded Spill
+ adcl $0, 184(%esp) # 4-byte Folded Spill
+ adcl $0, 188(%esp) # 4-byte Folded Spill
+ adcl $0, 168(%esp) # 4-byte Folded Spill
+ adcl $0, 176(%esp) # 4-byte Folded Spill
+ adcl $0, 172(%esp) # 4-byte Folded Spill
+ adcl $0, 152(%esp) # 4-byte Folded Spill
+ movl 156(%esp), %edi # 4-byte Reload
+ adcl $0, %edi
+ adcl $0, 144(%esp) # 4-byte Folded Spill
+ adcl $0, %ebp
+ movl %ebp, 132(%esp) # 4-byte Spill
+ adcl $0, 128(%esp) # 4-byte Folded Spill
+ adcl $0, 124(%esp) # 4-byte Folded Spill
+ adcl $0, %ecx
+ movl %ecx, 112(%esp) # 4-byte Spill
+ movl %edx, %ebp
+ movl %ebp, %eax
+ imull 100(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 1144(%esp), %ecx
+ movl 1384(%esp), %edx
+ movl 116(%esp), %ebx # 4-byte Reload
+ calll .LmulPv512x32
+ addl 1144(%esp), %ebp
+ movl 80(%esp), %ecx # 4-byte Reload
+ adcl 1148(%esp), %ecx
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 1152(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 1156(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 1160(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 1164(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 1168(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 1172(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 120(%esp), %eax # 4-byte Reload
+ adcl 1176(%esp), %eax
+ movl %eax, 120(%esp) # 4-byte Spill
+ movl 140(%esp), %eax # 4-byte Reload
+ adcl 1180(%esp), %eax
+ movl %eax, 140(%esp) # 4-byte Spill
+ adcl 1184(%esp), %esi
+ movl %esi, 136(%esp) # 4-byte Spill
+ movl 160(%esp), %eax # 4-byte Reload
+ adcl 1188(%esp), %eax
+ movl %eax, 160(%esp) # 4-byte Spill
+ movl 164(%esp), %eax # 4-byte Reload
+ adcl 1192(%esp), %eax
+ movl %eax, 164(%esp) # 4-byte Spill
+ movl 180(%esp), %eax # 4-byte Reload
+ adcl 1196(%esp), %eax
+ movl %eax, 180(%esp) # 4-byte Spill
+ movl 200(%esp), %eax # 4-byte Reload
+ adcl 1200(%esp), %eax
+ movl %eax, 200(%esp) # 4-byte Spill
+ movl 204(%esp), %eax # 4-byte Reload
+ adcl 1204(%esp), %eax
+ movl %eax, 204(%esp) # 4-byte Spill
+ movl 192(%esp), %eax # 4-byte Reload
+ adcl 1208(%esp), %eax
+ movl %eax, 192(%esp) # 4-byte Spill
+ adcl $0, 196(%esp) # 4-byte Folded Spill
+ adcl $0, 148(%esp) # 4-byte Folded Spill
+ adcl $0, 184(%esp) # 4-byte Folded Spill
+ adcl $0, 188(%esp) # 4-byte Folded Spill
+ movl 168(%esp), %ebp # 4-byte Reload
+ adcl $0, %ebp
+ adcl $0, 176(%esp) # 4-byte Folded Spill
+ adcl $0, 172(%esp) # 4-byte Folded Spill
+ adcl $0, 152(%esp) # 4-byte Folded Spill
+ adcl $0, %edi
+ movl %edi, 156(%esp) # 4-byte Spill
+ adcl $0, 144(%esp) # 4-byte Folded Spill
+ adcl $0, 132(%esp) # 4-byte Folded Spill
+ movl 128(%esp), %edi # 4-byte Reload
+ adcl $0, %edi
+ adcl $0, 124(%esp) # 4-byte Folded Spill
+ adcl $0, 112(%esp) # 4-byte Folded Spill
+ movl %ecx, %eax
+ movl %ecx, %esi
+ imull 100(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 1072(%esp), %ecx
+ movl 1384(%esp), %edx
+ movl 116(%esp), %ebx # 4-byte Reload
+ calll .LmulPv512x32
+ addl 1072(%esp), %esi
+ movl 84(%esp), %ecx # 4-byte Reload
+ adcl 1076(%esp), %ecx
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 1080(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 1084(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 1088(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 1092(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 1096(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 120(%esp), %eax # 4-byte Reload
+ adcl 1100(%esp), %eax
+ movl %eax, 120(%esp) # 4-byte Spill
+ movl 140(%esp), %eax # 4-byte Reload
+ adcl 1104(%esp), %eax
+ movl %eax, 140(%esp) # 4-byte Spill
+ movl 136(%esp), %eax # 4-byte Reload
+ adcl 1108(%esp), %eax
+ movl %eax, 136(%esp) # 4-byte Spill
+ movl 160(%esp), %eax # 4-byte Reload
+ adcl 1112(%esp), %eax
+ movl %eax, 160(%esp) # 4-byte Spill
+ movl 164(%esp), %eax # 4-byte Reload
+ adcl 1116(%esp), %eax
+ movl %eax, 164(%esp) # 4-byte Spill
+ movl 180(%esp), %eax # 4-byte Reload
+ adcl 1120(%esp), %eax
+ movl %eax, 180(%esp) # 4-byte Spill
+ movl 200(%esp), %eax # 4-byte Reload
+ adcl 1124(%esp), %eax
+ movl %eax, 200(%esp) # 4-byte Spill
+ movl 204(%esp), %eax # 4-byte Reload
+ adcl 1128(%esp), %eax
+ movl %eax, 204(%esp) # 4-byte Spill
+ movl 192(%esp), %eax # 4-byte Reload
+ adcl 1132(%esp), %eax
+ movl %eax, 192(%esp) # 4-byte Spill
+ movl 196(%esp), %eax # 4-byte Reload
+ adcl 1136(%esp), %eax
+ movl %eax, 196(%esp) # 4-byte Spill
+ adcl $0, 148(%esp) # 4-byte Folded Spill
+ adcl $0, 184(%esp) # 4-byte Folded Spill
+ adcl $0, 188(%esp) # 4-byte Folded Spill
+ adcl $0, %ebp
+ movl %ebp, 168(%esp) # 4-byte Spill
+ adcl $0, 176(%esp) # 4-byte Folded Spill
+ adcl $0, 172(%esp) # 4-byte Folded Spill
+ adcl $0, 152(%esp) # 4-byte Folded Spill
+ adcl $0, 156(%esp) # 4-byte Folded Spill
+ adcl $0, 144(%esp) # 4-byte Folded Spill
+ adcl $0, 132(%esp) # 4-byte Folded Spill
+ adcl $0, %edi
+ movl %edi, 128(%esp) # 4-byte Spill
+ movl 124(%esp), %esi # 4-byte Reload
+ adcl $0, %esi
+ adcl $0, 112(%esp) # 4-byte Folded Spill
+ movl %ecx, %edi
+ movl %edi, %eax
+ imull 100(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 1000(%esp), %ecx
+ movl 1384(%esp), %edx
+ movl 116(%esp), %ebx # 4-byte Reload
+ calll .LmulPv512x32
+ addl 1000(%esp), %edi
+ movl 88(%esp), %ecx # 4-byte Reload
+ adcl 1004(%esp), %ecx
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 1008(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 1012(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 1016(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 1020(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 120(%esp), %eax # 4-byte Reload
+ adcl 1024(%esp), %eax
+ movl %eax, 120(%esp) # 4-byte Spill
+ movl 140(%esp), %eax # 4-byte Reload
+ adcl 1028(%esp), %eax
+ movl %eax, 140(%esp) # 4-byte Spill
+ movl 136(%esp), %eax # 4-byte Reload
+ adcl 1032(%esp), %eax
+ movl %eax, 136(%esp) # 4-byte Spill
+ movl 160(%esp), %eax # 4-byte Reload
+ adcl 1036(%esp), %eax
+ movl %eax, 160(%esp) # 4-byte Spill
+ movl 164(%esp), %eax # 4-byte Reload
+ adcl 1040(%esp), %eax
+ movl %eax, 164(%esp) # 4-byte Spill
+ movl 180(%esp), %eax # 4-byte Reload
+ adcl 1044(%esp), %eax
+ movl %eax, 180(%esp) # 4-byte Spill
+ movl 200(%esp), %eax # 4-byte Reload
+ adcl 1048(%esp), %eax
+ movl %eax, 200(%esp) # 4-byte Spill
+ movl 204(%esp), %eax # 4-byte Reload
+ adcl 1052(%esp), %eax
+ movl %eax, 204(%esp) # 4-byte Spill
+ movl 192(%esp), %eax # 4-byte Reload
+ adcl 1056(%esp), %eax
+ movl %eax, 192(%esp) # 4-byte Spill
+ movl 196(%esp), %eax # 4-byte Reload
+ adcl 1060(%esp), %eax
+ movl %eax, 196(%esp) # 4-byte Spill
+ movl 148(%esp), %eax # 4-byte Reload
+ adcl 1064(%esp), %eax
+ movl %eax, 148(%esp) # 4-byte Spill
+ adcl $0, 184(%esp) # 4-byte Folded Spill
+ movl 188(%esp), %ebp # 4-byte Reload
+ adcl $0, %ebp
+ adcl $0, 168(%esp) # 4-byte Folded Spill
+ adcl $0, 176(%esp) # 4-byte Folded Spill
+ movl 172(%esp), %edi # 4-byte Reload
+ adcl $0, %edi
+ adcl $0, 152(%esp) # 4-byte Folded Spill
+ adcl $0, 156(%esp) # 4-byte Folded Spill
+ adcl $0, 144(%esp) # 4-byte Folded Spill
+ adcl $0, 132(%esp) # 4-byte Folded Spill
+ adcl $0, 128(%esp) # 4-byte Folded Spill
+ adcl $0, %esi
+ movl %esi, 124(%esp) # 4-byte Spill
+ adcl $0, 112(%esp) # 4-byte Folded Spill
+ movl %ecx, %esi
+ movl %esi, %eax
+ imull 100(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 928(%esp), %ecx
+ movl 1384(%esp), %edx
+ movl 116(%esp), %ebx # 4-byte Reload
+ calll .LmulPv512x32
+ addl 928(%esp), %esi
+ movl 92(%esp), %ecx # 4-byte Reload
+ adcl 932(%esp), %ecx
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 936(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 940(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 944(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 120(%esp), %eax # 4-byte Reload
+ adcl 948(%esp), %eax
+ movl %eax, 120(%esp) # 4-byte Spill
+ movl 140(%esp), %eax # 4-byte Reload
+ adcl 952(%esp), %eax
+ movl %eax, 140(%esp) # 4-byte Spill
+ movl 136(%esp), %eax # 4-byte Reload
+ adcl 956(%esp), %eax
+ movl %eax, 136(%esp) # 4-byte Spill
+ movl 160(%esp), %eax # 4-byte Reload
+ adcl 960(%esp), %eax
+ movl %eax, 160(%esp) # 4-byte Spill
+ movl 164(%esp), %eax # 4-byte Reload
+ adcl 964(%esp), %eax
+ movl %eax, 164(%esp) # 4-byte Spill
+ movl 180(%esp), %eax # 4-byte Reload
+ adcl 968(%esp), %eax
+ movl %eax, 180(%esp) # 4-byte Spill
+ movl 200(%esp), %eax # 4-byte Reload
+ adcl 972(%esp), %eax
+ movl %eax, 200(%esp) # 4-byte Spill
+ movl 204(%esp), %eax # 4-byte Reload
+ adcl 976(%esp), %eax
+ movl %eax, 204(%esp) # 4-byte Spill
+ movl 192(%esp), %eax # 4-byte Reload
+ adcl 980(%esp), %eax
+ movl %eax, 192(%esp) # 4-byte Spill
+ movl 196(%esp), %eax # 4-byte Reload
+ adcl 984(%esp), %eax
+ movl %eax, 196(%esp) # 4-byte Spill
+ movl 148(%esp), %eax # 4-byte Reload
+ adcl 988(%esp), %eax
+ movl %eax, 148(%esp) # 4-byte Spill
+ movl 184(%esp), %eax # 4-byte Reload
+ adcl 992(%esp), %eax
+ movl %eax, 184(%esp) # 4-byte Spill
+ adcl $0, %ebp
+ movl %ebp, 188(%esp) # 4-byte Spill
+ adcl $0, 168(%esp) # 4-byte Folded Spill
+ adcl $0, 176(%esp) # 4-byte Folded Spill
+ adcl $0, %edi
+ movl %edi, 172(%esp) # 4-byte Spill
+ adcl $0, 152(%esp) # 4-byte Folded Spill
+ adcl $0, 156(%esp) # 4-byte Folded Spill
+ movl 144(%esp), %esi # 4-byte Reload
+ adcl $0, %esi
+ adcl $0, 132(%esp) # 4-byte Folded Spill
+ adcl $0, 128(%esp) # 4-byte Folded Spill
+ adcl $0, 124(%esp) # 4-byte Folded Spill
+ adcl $0, 112(%esp) # 4-byte Folded Spill
+ movl %ecx, %edi
+ movl %edi, %eax
+ movl 100(%esp), %ebp # 4-byte Reload
+ imull %ebp, %eax
+ movl %eax, (%esp)
+ leal 856(%esp), %ecx
+ movl 1384(%esp), %edx
+ movl 116(%esp), %ebx # 4-byte Reload
+ calll .LmulPv512x32
+ addl 856(%esp), %edi
+ movl 96(%esp), %ecx # 4-byte Reload
+ adcl 860(%esp), %ecx
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 864(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 868(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 120(%esp), %eax # 4-byte Reload
+ adcl 872(%esp), %eax
+ movl %eax, 120(%esp) # 4-byte Spill
+ movl 140(%esp), %eax # 4-byte Reload
+ adcl 876(%esp), %eax
+ movl %eax, 140(%esp) # 4-byte Spill
+ movl 136(%esp), %eax # 4-byte Reload
+ adcl 880(%esp), %eax
+ movl %eax, 136(%esp) # 4-byte Spill
+ movl 160(%esp), %eax # 4-byte Reload
+ adcl 884(%esp), %eax
+ movl %eax, 160(%esp) # 4-byte Spill
+ movl 164(%esp), %eax # 4-byte Reload
+ adcl 888(%esp), %eax
+ movl %eax, 164(%esp) # 4-byte Spill
+ movl 180(%esp), %eax # 4-byte Reload
+ adcl 892(%esp), %eax
+ movl %eax, 180(%esp) # 4-byte Spill
+ movl 200(%esp), %eax # 4-byte Reload
+ adcl 896(%esp), %eax
+ movl %eax, 200(%esp) # 4-byte Spill
+ movl 204(%esp), %eax # 4-byte Reload
+ adcl 900(%esp), %eax
+ movl %eax, 204(%esp) # 4-byte Spill
+ movl 192(%esp), %eax # 4-byte Reload
+ adcl 904(%esp), %eax
+ movl %eax, 192(%esp) # 4-byte Spill
+ movl 196(%esp), %eax # 4-byte Reload
+ adcl 908(%esp), %eax
+ movl %eax, 196(%esp) # 4-byte Spill
+ movl 148(%esp), %eax # 4-byte Reload
+ adcl 912(%esp), %eax
+ movl %eax, 148(%esp) # 4-byte Spill
+ movl 184(%esp), %eax # 4-byte Reload
+ adcl 916(%esp), %eax
+ movl %eax, 184(%esp) # 4-byte Spill
+ movl 188(%esp), %eax # 4-byte Reload
+ adcl 920(%esp), %eax
+ movl %eax, 188(%esp) # 4-byte Spill
+ adcl $0, 168(%esp) # 4-byte Folded Spill
+ movl 176(%esp), %edi # 4-byte Reload
+ adcl $0, %edi
+ adcl $0, 172(%esp) # 4-byte Folded Spill
+ adcl $0, 152(%esp) # 4-byte Folded Spill
+ adcl $0, 156(%esp) # 4-byte Folded Spill
+ adcl $0, %esi
+ movl %esi, 144(%esp) # 4-byte Spill
+ adcl $0, 132(%esp) # 4-byte Folded Spill
+ adcl $0, 128(%esp) # 4-byte Folded Spill
+ adcl $0, 124(%esp) # 4-byte Folded Spill
+ adcl $0, 112(%esp) # 4-byte Folded Spill
+ movl %ecx, %esi
+ movl %esi, %eax
+ imull %ebp, %eax
+ movl %eax, (%esp)
+ leal 784(%esp), %ecx
+ movl 1384(%esp), %edx
+ movl 116(%esp), %ebx # 4-byte Reload
+ calll .LmulPv512x32
+ addl 784(%esp), %esi
+ movl 104(%esp), %ecx # 4-byte Reload
+ adcl 788(%esp), %ecx
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 792(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 120(%esp), %eax # 4-byte Reload
+ adcl 796(%esp), %eax
+ movl %eax, 120(%esp) # 4-byte Spill
+ movl 140(%esp), %eax # 4-byte Reload
+ adcl 800(%esp), %eax
+ movl %eax, 140(%esp) # 4-byte Spill
+ movl 136(%esp), %eax # 4-byte Reload
+ adcl 804(%esp), %eax
+ movl %eax, 136(%esp) # 4-byte Spill
+ movl 160(%esp), %eax # 4-byte Reload
+ adcl 808(%esp), %eax
+ movl %eax, 160(%esp) # 4-byte Spill
+ movl 164(%esp), %eax # 4-byte Reload
+ adcl 812(%esp), %eax
+ movl %eax, 164(%esp) # 4-byte Spill
+ movl 180(%esp), %eax # 4-byte Reload
+ adcl 816(%esp), %eax
+ movl %eax, 180(%esp) # 4-byte Spill
+ movl 200(%esp), %eax # 4-byte Reload
+ adcl 820(%esp), %eax
+ movl %eax, 200(%esp) # 4-byte Spill
+ movl 204(%esp), %eax # 4-byte Reload
+ adcl 824(%esp), %eax
+ movl %eax, 204(%esp) # 4-byte Spill
+ movl 192(%esp), %ebp # 4-byte Reload
+ adcl 828(%esp), %ebp
+ movl 196(%esp), %eax # 4-byte Reload
+ adcl 832(%esp), %eax
+ movl %eax, 196(%esp) # 4-byte Spill
+ movl 148(%esp), %eax # 4-byte Reload
+ adcl 836(%esp), %eax
+ movl %eax, 148(%esp) # 4-byte Spill
+ movl 184(%esp), %eax # 4-byte Reload
+ adcl 840(%esp), %eax
+ movl %eax, 184(%esp) # 4-byte Spill
+ movl 188(%esp), %eax # 4-byte Reload
+ adcl 844(%esp), %eax
+ movl %eax, 188(%esp) # 4-byte Spill
+ movl 168(%esp), %eax # 4-byte Reload
+ adcl 848(%esp), %eax
+ movl %eax, 168(%esp) # 4-byte Spill
+ adcl $0, %edi
+ movl %edi, 176(%esp) # 4-byte Spill
+ adcl $0, 172(%esp) # 4-byte Folded Spill
+ adcl $0, 152(%esp) # 4-byte Folded Spill
+ movl 156(%esp), %esi # 4-byte Reload
+ adcl $0, %esi
+ adcl $0, 144(%esp) # 4-byte Folded Spill
+ adcl $0, 132(%esp) # 4-byte Folded Spill
+ adcl $0, 128(%esp) # 4-byte Folded Spill
+ adcl $0, 124(%esp) # 4-byte Folded Spill
+ adcl $0, 112(%esp) # 4-byte Folded Spill
+ movl %ecx, %edi
+ movl %edi, %eax
+ imull 100(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 712(%esp), %ecx
+ movl 1384(%esp), %edx
+ movl 116(%esp), %ebx # 4-byte Reload
+ calll .LmulPv512x32
+ addl 712(%esp), %edi
+ movl 108(%esp), %ecx # 4-byte Reload
+ adcl 716(%esp), %ecx
+ movl 120(%esp), %eax # 4-byte Reload
+ adcl 720(%esp), %eax
+ movl %eax, 120(%esp) # 4-byte Spill
+ movl 140(%esp), %eax # 4-byte Reload
+ adcl 724(%esp), %eax
+ movl %eax, 140(%esp) # 4-byte Spill
+ movl 136(%esp), %eax # 4-byte Reload
+ adcl 728(%esp), %eax
+ movl %eax, 136(%esp) # 4-byte Spill
+ movl 160(%esp), %eax # 4-byte Reload
+ adcl 732(%esp), %eax
+ movl %eax, 160(%esp) # 4-byte Spill
+ movl 164(%esp), %eax # 4-byte Reload
+ adcl 736(%esp), %eax
+ movl %eax, 164(%esp) # 4-byte Spill
+ movl 180(%esp), %eax # 4-byte Reload
+ adcl 740(%esp), %eax
+ movl %eax, 180(%esp) # 4-byte Spill
+ movl 200(%esp), %eax # 4-byte Reload
+ adcl 744(%esp), %eax
+ movl %eax, 200(%esp) # 4-byte Spill
+ movl 204(%esp), %eax # 4-byte Reload
+ adcl 748(%esp), %eax
+ movl %eax, 204(%esp) # 4-byte Spill
+ adcl 752(%esp), %ebp
+ movl %ebp, 192(%esp) # 4-byte Spill
+ movl 196(%esp), %edi # 4-byte Reload
+ adcl 756(%esp), %edi
+ movl 148(%esp), %eax # 4-byte Reload
+ adcl 760(%esp), %eax
+ movl %eax, 148(%esp) # 4-byte Spill
+ movl 184(%esp), %eax # 4-byte Reload
+ adcl 764(%esp), %eax
+ movl %eax, 184(%esp) # 4-byte Spill
+ movl 188(%esp), %eax # 4-byte Reload
+ adcl 768(%esp), %eax
+ movl %eax, 188(%esp) # 4-byte Spill
+ movl 168(%esp), %eax # 4-byte Reload
+ adcl 772(%esp), %eax
+ movl %eax, 168(%esp) # 4-byte Spill
+ movl 176(%esp), %eax # 4-byte Reload
+ adcl 776(%esp), %eax
+ movl %eax, 176(%esp) # 4-byte Spill
+ adcl $0, 172(%esp) # 4-byte Folded Spill
+ adcl $0, 152(%esp) # 4-byte Folded Spill
+ adcl $0, %esi
+ movl %esi, 156(%esp) # 4-byte Spill
+ adcl $0, 144(%esp) # 4-byte Folded Spill
+ adcl $0, 132(%esp) # 4-byte Folded Spill
+ adcl $0, 128(%esp) # 4-byte Folded Spill
+ adcl $0, 124(%esp) # 4-byte Folded Spill
+ movl 112(%esp), %ebp # 4-byte Reload
+ adcl $0, %ebp
+ movl %ecx, %esi
+ movl %esi, %eax
+ imull 100(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 640(%esp), %ecx
+ movl 1384(%esp), %edx
+ movl 116(%esp), %ebx # 4-byte Reload
+ calll .LmulPv512x32
+ addl 640(%esp), %esi
+ movl 120(%esp), %ecx # 4-byte Reload
+ adcl 644(%esp), %ecx
+ movl 140(%esp), %eax # 4-byte Reload
+ adcl 648(%esp), %eax
+ movl %eax, 140(%esp) # 4-byte Spill
+ movl 136(%esp), %eax # 4-byte Reload
+ adcl 652(%esp), %eax
+ movl %eax, 136(%esp) # 4-byte Spill
+ movl 160(%esp), %eax # 4-byte Reload
+ adcl 656(%esp), %eax
+ movl %eax, 160(%esp) # 4-byte Spill
+ movl 164(%esp), %eax # 4-byte Reload
+ adcl 660(%esp), %eax
+ movl %eax, 164(%esp) # 4-byte Spill
+ movl 180(%esp), %eax # 4-byte Reload
+ adcl 664(%esp), %eax
+ movl %eax, 180(%esp) # 4-byte Spill
+ movl 200(%esp), %esi # 4-byte Reload
+ adcl 668(%esp), %esi
+ movl 204(%esp), %eax # 4-byte Reload
+ adcl 672(%esp), %eax
+ movl %eax, 204(%esp) # 4-byte Spill
+ movl 192(%esp), %eax # 4-byte Reload
+ adcl 676(%esp), %eax
+ movl %eax, 192(%esp) # 4-byte Spill
+ adcl 680(%esp), %edi
+ movl %edi, 196(%esp) # 4-byte Spill
+ movl 148(%esp), %eax # 4-byte Reload
+ adcl 684(%esp), %eax
+ movl %eax, 148(%esp) # 4-byte Spill
+ movl 184(%esp), %eax # 4-byte Reload
+ adcl 688(%esp), %eax
+ movl %eax, 184(%esp) # 4-byte Spill
+ movl 188(%esp), %eax # 4-byte Reload
+ adcl 692(%esp), %eax
+ movl %eax, 188(%esp) # 4-byte Spill
+ movl 168(%esp), %eax # 4-byte Reload
+ adcl 696(%esp), %eax
+ movl %eax, 168(%esp) # 4-byte Spill
+ movl 176(%esp), %eax # 4-byte Reload
+ adcl 700(%esp), %eax
+ movl %eax, 176(%esp) # 4-byte Spill
+ movl 172(%esp), %eax # 4-byte Reload
+ adcl 704(%esp), %eax
+ movl %eax, 172(%esp) # 4-byte Spill
+ movl 152(%esp), %edi # 4-byte Reload
+ adcl $0, %edi
+ adcl $0, 156(%esp) # 4-byte Folded Spill
+ adcl $0, 144(%esp) # 4-byte Folded Spill
+ adcl $0, 132(%esp) # 4-byte Folded Spill
+ adcl $0, 128(%esp) # 4-byte Folded Spill
+ adcl $0, 124(%esp) # 4-byte Folded Spill
+ adcl $0, %ebp
+ movl %ebp, 112(%esp) # 4-byte Spill
+ movl %ecx, %ebp
+ movl %ebp, %eax
+ imull 100(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 568(%esp), %ecx
+ movl 1384(%esp), %eax
+ movl %eax, %edx
+ movl 116(%esp), %ebx # 4-byte Reload
+ calll .LmulPv512x32
+ addl 568(%esp), %ebp
+ movl 140(%esp), %ecx # 4-byte Reload
+ adcl 572(%esp), %ecx
+ movl 136(%esp), %eax # 4-byte Reload
+ adcl 576(%esp), %eax
+ movl %eax, 136(%esp) # 4-byte Spill
+ movl 160(%esp), %eax # 4-byte Reload
+ adcl 580(%esp), %eax
+ movl %eax, 160(%esp) # 4-byte Spill
+ movl 164(%esp), %eax # 4-byte Reload
+ adcl 584(%esp), %eax
+ movl %eax, 164(%esp) # 4-byte Spill
+ movl 180(%esp), %ebp # 4-byte Reload
+ adcl 588(%esp), %ebp
+ adcl 592(%esp), %esi
+ movl %esi, 200(%esp) # 4-byte Spill
+ movl 204(%esp), %esi # 4-byte Reload
+ adcl 596(%esp), %esi
+ movl 192(%esp), %eax # 4-byte Reload
+ adcl 600(%esp), %eax
+ movl %eax, 192(%esp) # 4-byte Spill
+ movl 196(%esp), %eax # 4-byte Reload
+ adcl 604(%esp), %eax
+ movl %eax, 196(%esp) # 4-byte Spill
+ movl 148(%esp), %eax # 4-byte Reload
+ adcl 608(%esp), %eax
+ movl %eax, 148(%esp) # 4-byte Spill
+ movl 184(%esp), %eax # 4-byte Reload
+ adcl 612(%esp), %eax
+ movl %eax, 184(%esp) # 4-byte Spill
+ movl 188(%esp), %eax # 4-byte Reload
+ adcl 616(%esp), %eax
+ movl %eax, 188(%esp) # 4-byte Spill
+ movl 168(%esp), %eax # 4-byte Reload
+ adcl 620(%esp), %eax
+ movl %eax, 168(%esp) # 4-byte Spill
+ movl 176(%esp), %eax # 4-byte Reload
+ adcl 624(%esp), %eax
+ movl %eax, 176(%esp) # 4-byte Spill
+ movl 172(%esp), %eax # 4-byte Reload
+ adcl 628(%esp), %eax
+ movl %eax, 172(%esp) # 4-byte Spill
+ adcl 632(%esp), %edi
+ movl %edi, 152(%esp) # 4-byte Spill
+ adcl $0, 156(%esp) # 4-byte Folded Spill
+ adcl $0, 144(%esp) # 4-byte Folded Spill
+ adcl $0, 132(%esp) # 4-byte Folded Spill
+ adcl $0, 128(%esp) # 4-byte Folded Spill
+ adcl $0, 124(%esp) # 4-byte Folded Spill
+ adcl $0, 112(%esp) # 4-byte Folded Spill
+ movl %ecx, %edi
+ movl %edi, %eax
+ imull 100(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 496(%esp), %ecx
+ movl 1384(%esp), %edx
+ movl 116(%esp), %ebx # 4-byte Reload
+ calll .LmulPv512x32
+ addl 496(%esp), %edi
+ movl 136(%esp), %ecx # 4-byte Reload
+ adcl 500(%esp), %ecx
+ movl 160(%esp), %eax # 4-byte Reload
+ adcl 504(%esp), %eax
+ movl %eax, 160(%esp) # 4-byte Spill
+ movl 164(%esp), %edi # 4-byte Reload
+ adcl 508(%esp), %edi
+ adcl 512(%esp), %ebp
+ movl %ebp, 180(%esp) # 4-byte Spill
+ movl 200(%esp), %eax # 4-byte Reload
+ adcl 516(%esp), %eax
+ movl %eax, 200(%esp) # 4-byte Spill
+ adcl 520(%esp), %esi
+ movl %esi, 204(%esp) # 4-byte Spill
+ movl 192(%esp), %eax # 4-byte Reload
+ adcl 524(%esp), %eax
+ movl %eax, 192(%esp) # 4-byte Spill
+ movl 196(%esp), %eax # 4-byte Reload
+ adcl 528(%esp), %eax
+ movl %eax, 196(%esp) # 4-byte Spill
+ movl 148(%esp), %ebp # 4-byte Reload
+ adcl 532(%esp), %ebp
+ movl 184(%esp), %eax # 4-byte Reload
+ adcl 536(%esp), %eax
+ movl %eax, 184(%esp) # 4-byte Spill
+ movl 188(%esp), %eax # 4-byte Reload
+ adcl 540(%esp), %eax
+ movl %eax, 188(%esp) # 4-byte Spill
+ movl 168(%esp), %eax # 4-byte Reload
+ adcl 544(%esp), %eax
+ movl %eax, 168(%esp) # 4-byte Spill
+ movl 176(%esp), %eax # 4-byte Reload
+ adcl 548(%esp), %eax
+ movl %eax, 176(%esp) # 4-byte Spill
+ movl 172(%esp), %eax # 4-byte Reload
+ adcl 552(%esp), %eax
+ movl %eax, 172(%esp) # 4-byte Spill
+ movl 152(%esp), %eax # 4-byte Reload
+ adcl 556(%esp), %eax
+ movl %eax, 152(%esp) # 4-byte Spill
+ movl 156(%esp), %eax # 4-byte Reload
+ adcl 560(%esp), %eax
+ movl %eax, 156(%esp) # 4-byte Spill
+ adcl $0, 144(%esp) # 4-byte Folded Spill
+ adcl $0, 132(%esp) # 4-byte Folded Spill
+ adcl $0, 128(%esp) # 4-byte Folded Spill
+ adcl $0, 124(%esp) # 4-byte Folded Spill
+ adcl $0, 112(%esp) # 4-byte Folded Spill
+ movl %ecx, %eax
+ movl %ecx, %esi
+ imull 100(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 424(%esp), %ecx
+ movl 1384(%esp), %edx
+ movl 116(%esp), %ebx # 4-byte Reload
+ calll .LmulPv512x32
+ addl 424(%esp), %esi
+ movl 160(%esp), %eax # 4-byte Reload
+ adcl 428(%esp), %eax
+ adcl 432(%esp), %edi
+ movl %edi, 164(%esp) # 4-byte Spill
+ movl 180(%esp), %ecx # 4-byte Reload
+ adcl 436(%esp), %ecx
+ movl %ecx, 180(%esp) # 4-byte Spill
+ movl 200(%esp), %ecx # 4-byte Reload
+ adcl 440(%esp), %ecx
+ movl %ecx, 200(%esp) # 4-byte Spill
+ movl 204(%esp), %ecx # 4-byte Reload
+ adcl 444(%esp), %ecx
+ movl %ecx, 204(%esp) # 4-byte Spill
+ movl 192(%esp), %ecx # 4-byte Reload
+ adcl 448(%esp), %ecx
+ movl %ecx, 192(%esp) # 4-byte Spill
+ movl 196(%esp), %ecx # 4-byte Reload
+ adcl 452(%esp), %ecx
+ movl %ecx, 196(%esp) # 4-byte Spill
+ adcl 456(%esp), %ebp
+ movl 184(%esp), %ecx # 4-byte Reload
+ adcl 460(%esp), %ecx
+ movl %ecx, 184(%esp) # 4-byte Spill
+ movl 188(%esp), %ecx # 4-byte Reload
+ adcl 464(%esp), %ecx
+ movl %ecx, 188(%esp) # 4-byte Spill
+ movl 168(%esp), %ecx # 4-byte Reload
+ adcl 468(%esp), %ecx
+ movl %ecx, 168(%esp) # 4-byte Spill
+ movl 176(%esp), %ecx # 4-byte Reload
+ adcl 472(%esp), %ecx
+ movl %ecx, 176(%esp) # 4-byte Spill
+ movl 172(%esp), %ecx # 4-byte Reload
+ adcl 476(%esp), %ecx
+ movl %ecx, 172(%esp) # 4-byte Spill
+ movl 152(%esp), %ecx # 4-byte Reload
+ adcl 480(%esp), %ecx
+ movl %ecx, 152(%esp) # 4-byte Spill
+ movl 156(%esp), %ecx # 4-byte Reload
+ adcl 484(%esp), %ecx
+ movl %ecx, 156(%esp) # 4-byte Spill
+ movl 144(%esp), %ecx # 4-byte Reload
+ adcl 488(%esp), %ecx
+ movl %ecx, 144(%esp) # 4-byte Spill
+ movl 132(%esp), %edi # 4-byte Reload
+ adcl $0, %edi
+ adcl $0, 128(%esp) # 4-byte Folded Spill
+ adcl $0, 124(%esp) # 4-byte Folded Spill
+ adcl $0, 112(%esp) # 4-byte Folded Spill
+ movl %eax, %esi
+ imull 100(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 352(%esp), %ecx
+ movl 1384(%esp), %edx
+ movl 116(%esp), %ebx # 4-byte Reload
+ calll .LmulPv512x32
+ addl 352(%esp), %esi
+ movl 164(%esp), %esi # 4-byte Reload
+ adcl 356(%esp), %esi
+ movl 180(%esp), %eax # 4-byte Reload
+ adcl 360(%esp), %eax
+ movl %eax, 180(%esp) # 4-byte Spill
+ movl 200(%esp), %eax # 4-byte Reload
+ adcl 364(%esp), %eax
+ movl %eax, 200(%esp) # 4-byte Spill
+ movl 204(%esp), %eax # 4-byte Reload
+ adcl 368(%esp), %eax
+ movl %eax, 204(%esp) # 4-byte Spill
+ movl 192(%esp), %eax # 4-byte Reload
+ adcl 372(%esp), %eax
+ movl %eax, 192(%esp) # 4-byte Spill
+ movl 196(%esp), %eax # 4-byte Reload
+ adcl 376(%esp), %eax
+ movl %eax, 196(%esp) # 4-byte Spill
+ adcl 380(%esp), %ebp
+ movl 184(%esp), %eax # 4-byte Reload
+ adcl 384(%esp), %eax
+ movl %eax, 184(%esp) # 4-byte Spill
+ movl 188(%esp), %eax # 4-byte Reload
+ adcl 388(%esp), %eax
+ movl %eax, 188(%esp) # 4-byte Spill
+ movl 168(%esp), %eax # 4-byte Reload
+ adcl 392(%esp), %eax
+ movl %eax, 168(%esp) # 4-byte Spill
+ movl 176(%esp), %eax # 4-byte Reload
+ adcl 396(%esp), %eax
+ movl %eax, 176(%esp) # 4-byte Spill
+ movl 172(%esp), %eax # 4-byte Reload
+ adcl 400(%esp), %eax
+ movl %eax, 172(%esp) # 4-byte Spill
+ movl 152(%esp), %eax # 4-byte Reload
+ adcl 404(%esp), %eax
+ movl %eax, 152(%esp) # 4-byte Spill
+ movl 156(%esp), %eax # 4-byte Reload
+ adcl 408(%esp), %eax
+ movl %eax, 156(%esp) # 4-byte Spill
+ movl 144(%esp), %eax # 4-byte Reload
+ adcl 412(%esp), %eax
+ movl %eax, 144(%esp) # 4-byte Spill
+ adcl 416(%esp), %edi
+ movl %edi, 132(%esp) # 4-byte Spill
+ movl 128(%esp), %edi # 4-byte Reload
+ adcl $0, %edi
+ adcl $0, 124(%esp) # 4-byte Folded Spill
+ adcl $0, 112(%esp) # 4-byte Folded Spill
+ movl %esi, %eax
+ imull 100(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 280(%esp), %ecx
+ movl 1384(%esp), %edx
+ movl 116(%esp), %ebx # 4-byte Reload
+ calll .LmulPv512x32
+ addl 280(%esp), %esi
+ movl 180(%esp), %ecx # 4-byte Reload
+ adcl 284(%esp), %ecx
+ movl 200(%esp), %eax # 4-byte Reload
+ adcl 288(%esp), %eax
+ movl %eax, 200(%esp) # 4-byte Spill
+ movl 204(%esp), %eax # 4-byte Reload
+ adcl 292(%esp), %eax
+ movl %eax, 204(%esp) # 4-byte Spill
+ movl 192(%esp), %eax # 4-byte Reload
+ adcl 296(%esp), %eax
+ movl %eax, 192(%esp) # 4-byte Spill
+ movl 196(%esp), %eax # 4-byte Reload
+ adcl 300(%esp), %eax
+ movl %eax, 196(%esp) # 4-byte Spill
+ adcl 304(%esp), %ebp
+ movl %ebp, 148(%esp) # 4-byte Spill
+ movl 184(%esp), %eax # 4-byte Reload
+ adcl 308(%esp), %eax
+ movl %eax, 184(%esp) # 4-byte Spill
+ movl 188(%esp), %eax # 4-byte Reload
+ adcl 312(%esp), %eax
+ movl %eax, 188(%esp) # 4-byte Spill
+ movl 168(%esp), %esi # 4-byte Reload
+ adcl 316(%esp), %esi
+ movl 176(%esp), %eax # 4-byte Reload
+ adcl 320(%esp), %eax
+ movl %eax, 176(%esp) # 4-byte Spill
+ movl 172(%esp), %eax # 4-byte Reload
+ adcl 324(%esp), %eax
+ movl %eax, 172(%esp) # 4-byte Spill
+ movl 152(%esp), %eax # 4-byte Reload
+ adcl 328(%esp), %eax
+ movl %eax, 152(%esp) # 4-byte Spill
+ movl 156(%esp), %eax # 4-byte Reload
+ adcl 332(%esp), %eax
+ movl %eax, 156(%esp) # 4-byte Spill
+ movl 144(%esp), %eax # 4-byte Reload
+ adcl 336(%esp), %eax
+ movl %eax, 144(%esp) # 4-byte Spill
+ movl 132(%esp), %eax # 4-byte Reload
+ adcl 340(%esp), %eax
+ movl %eax, 132(%esp) # 4-byte Spill
+ adcl 344(%esp), %edi
+ movl %edi, 128(%esp) # 4-byte Spill
+ movl 124(%esp), %edi # 4-byte Reload
+ adcl $0, %edi
+ adcl $0, 112(%esp) # 4-byte Folded Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ imull %ecx, %eax
+ movl %ecx, %ebp
+ movl %eax, (%esp)
+ leal 208(%esp), %ecx
+ movl 1384(%esp), %edx
+ movl 116(%esp), %ebx # 4-byte Reload
+ calll .LmulPv512x32
+ addl 208(%esp), %ebp
+ movl 200(%esp), %edx # 4-byte Reload
+ adcl 212(%esp), %edx
+ movl %edx, 200(%esp) # 4-byte Spill
+ movl 204(%esp), %eax # 4-byte Reload
+ adcl 216(%esp), %eax
+ movl %eax, 204(%esp) # 4-byte Spill
+ movl 192(%esp), %ecx # 4-byte Reload
+ adcl 220(%esp), %ecx
+ movl %ecx, 192(%esp) # 4-byte Spill
+ movl 196(%esp), %eax # 4-byte Reload
+ adcl 224(%esp), %eax
+ movl %eax, 196(%esp) # 4-byte Spill
+ movl 148(%esp), %ebp # 4-byte Reload
+ adcl 228(%esp), %ebp
+ movl %ebp, 148(%esp) # 4-byte Spill
+ movl 184(%esp), %eax # 4-byte Reload
+ adcl 232(%esp), %eax
+ movl %eax, 184(%esp) # 4-byte Spill
+ movl %eax, %ebx
+ movl 188(%esp), %eax # 4-byte Reload
+ adcl 236(%esp), %eax
+ movl %eax, 188(%esp) # 4-byte Spill
+ adcl 240(%esp), %esi
+ movl %esi, 168(%esp) # 4-byte Spill
+ movl 176(%esp), %eax # 4-byte Reload
+ adcl 244(%esp), %eax
+ movl %eax, 176(%esp) # 4-byte Spill
+ movl 172(%esp), %eax # 4-byte Reload
+ adcl 248(%esp), %eax
+ movl %eax, 172(%esp) # 4-byte Spill
+ movl 152(%esp), %eax # 4-byte Reload
+ adcl 252(%esp), %eax
+ movl %eax, 152(%esp) # 4-byte Spill
+ movl 156(%esp), %eax # 4-byte Reload
+ adcl 256(%esp), %eax
+ movl %eax, 156(%esp) # 4-byte Spill
+ movl 144(%esp), %eax # 4-byte Reload
+ adcl 260(%esp), %eax
+ movl %eax, 144(%esp) # 4-byte Spill
+ movl 132(%esp), %eax # 4-byte Reload
+ adcl 264(%esp), %eax
+ movl %eax, 132(%esp) # 4-byte Spill
+ movl 128(%esp), %eax # 4-byte Reload
+ adcl 268(%esp), %eax
+ movl %eax, 128(%esp) # 4-byte Spill
+ adcl 272(%esp), %edi
+ movl %edi, 124(%esp) # 4-byte Spill
+ movl 112(%esp), %edi # 4-byte Reload
+ adcl $0, %edi
+ movl %edx, %eax
+ subl 24(%esp), %edx # 4-byte Folded Reload
+ movl 204(%esp), %esi # 4-byte Reload
+ sbbl 12(%esp), %esi # 4-byte Folded Reload
+ sbbl 16(%esp), %ecx # 4-byte Folded Reload
+ movl 196(%esp), %eax # 4-byte Reload
+ sbbl 20(%esp), %eax # 4-byte Folded Reload
+ sbbl 28(%esp), %ebp # 4-byte Folded Reload
+ sbbl 32(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 100(%esp) # 4-byte Spill
+ movl 188(%esp), %ebx # 4-byte Reload
+ sbbl 36(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 104(%esp) # 4-byte Spill
+ movl 168(%esp), %ebx # 4-byte Reload
+ sbbl 40(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 108(%esp) # 4-byte Spill
+ movl 176(%esp), %ebx # 4-byte Reload
+ sbbl 44(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 112(%esp) # 4-byte Spill
+ movl 172(%esp), %ebx # 4-byte Reload
+ sbbl 48(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 116(%esp) # 4-byte Spill
+ movl 152(%esp), %ebx # 4-byte Reload
+ sbbl 52(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 120(%esp) # 4-byte Spill
+ movl 156(%esp), %ebx # 4-byte Reload
+ sbbl 56(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 136(%esp) # 4-byte Spill
+ movl 144(%esp), %ebx # 4-byte Reload
+ sbbl 60(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 140(%esp) # 4-byte Spill
+ movl 132(%esp), %ebx # 4-byte Reload
+ sbbl 64(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 160(%esp) # 4-byte Spill
+ movl 128(%esp), %ebx # 4-byte Reload
+ sbbl 68(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 164(%esp) # 4-byte Spill
+ movl 124(%esp), %ebx # 4-byte Reload
+ sbbl 72(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 180(%esp) # 4-byte Spill
+ sbbl $0, %edi
+ andl $1, %edi
+ movl %edi, %ebx
+ jne .LBB244_2
+# BB#1:
+ movl %edx, 200(%esp) # 4-byte Spill
+.LBB244_2:
+ movl 1376(%esp), %edx
+ movl 200(%esp), %edi # 4-byte Reload
+ movl %edi, (%edx)
+ testb %bl, %bl
+ jne .LBB244_4
+# BB#3:
+ movl %esi, 204(%esp) # 4-byte Spill
+.LBB244_4:
+ movl 204(%esp), %esi # 4-byte Reload
+ movl %esi, 4(%edx)
+ movl 192(%esp), %esi # 4-byte Reload
+ jne .LBB244_6
+# BB#5:
+ movl %ecx, %esi
+.LBB244_6:
+ movl %esi, 8(%edx)
+ movl 196(%esp), %ecx # 4-byte Reload
+ jne .LBB244_8
+# BB#7:
+ movl %eax, %ecx
+.LBB244_8:
+ movl %ecx, 12(%edx)
+ movl 128(%esp), %esi # 4-byte Reload
+ movl 148(%esp), %eax # 4-byte Reload
+ jne .LBB244_10
+# BB#9:
+ movl %ebp, %eax
+.LBB244_10:
+ movl %eax, 16(%edx)
+ movl 124(%esp), %ecx # 4-byte Reload
+ movl 176(%esp), %eax # 4-byte Reload
+ movl 184(%esp), %ebp # 4-byte Reload
+ jne .LBB244_12
+# BB#11:
+ movl 100(%esp), %ebp # 4-byte Reload
+.LBB244_12:
+ movl %ebp, 20(%edx)
+ movl 152(%esp), %ebp # 4-byte Reload
+ movl 188(%esp), %ebx # 4-byte Reload
+ jne .LBB244_14
+# BB#13:
+ movl 104(%esp), %ebx # 4-byte Reload
+.LBB244_14:
+ movl %ebx, 24(%edx)
+ movl 156(%esp), %ebx # 4-byte Reload
+ movl 168(%esp), %edi # 4-byte Reload
+ jne .LBB244_16
+# BB#15:
+ movl 108(%esp), %edi # 4-byte Reload
+.LBB244_16:
+ movl %edi, 28(%edx)
+ movl 144(%esp), %edi # 4-byte Reload
+ jne .LBB244_18
+# BB#17:
+ movl 112(%esp), %eax # 4-byte Reload
+.LBB244_18:
+ movl %eax, 32(%edx)
+ jne .LBB244_20
+# BB#19:
+ movl 116(%esp), %eax # 4-byte Reload
+ movl %eax, 172(%esp) # 4-byte Spill
+.LBB244_20:
+ movl 172(%esp), %eax # 4-byte Reload
+ movl %eax, 36(%edx)
+ jne .LBB244_22
+# BB#21:
+ movl 120(%esp), %ebp # 4-byte Reload
+.LBB244_22:
+ movl %ebp, 40(%edx)
+ movl 132(%esp), %eax # 4-byte Reload
+ jne .LBB244_24
+# BB#23:
+ movl 136(%esp), %ebx # 4-byte Reload
+.LBB244_24:
+ movl %ebx, 44(%edx)
+ jne .LBB244_26
+# BB#25:
+ movl 140(%esp), %edi # 4-byte Reload
+.LBB244_26:
+ movl %edi, 48(%edx)
+ jne .LBB244_28
+# BB#27:
+ movl 160(%esp), %eax # 4-byte Reload
+.LBB244_28:
+ movl %eax, 52(%edx)
+ jne .LBB244_30
+# BB#29:
+ movl 164(%esp), %esi # 4-byte Reload
+.LBB244_30:
+ movl %esi, 56(%edx)
+ jne .LBB244_32
+# BB#31:
+ movl 180(%esp), %ecx # 4-byte Reload
+.LBB244_32:
+ movl %ecx, 60(%edx)
+ addl $1356, %esp # imm = 0x54C
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end244:
+ .size mcl_fp_montRed16L, .Lfunc_end244-mcl_fp_montRed16L
+
+ .globl mcl_fp_addPre16L
+ .align 16, 0x90
+ .type mcl_fp_addPre16L,@function
+mcl_fp_addPre16L: # @mcl_fp_addPre16L
+# BB#0:
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ movl 24(%esp), %eax
+ movl (%eax), %edx
+ movl 4(%eax), %esi
+ movl 20(%esp), %ecx
+ addl (%ecx), %edx
+ adcl 4(%ecx), %esi
+ movl 8(%eax), %ebx
+ adcl 8(%ecx), %ebx
+ movl 16(%esp), %edi
+ movl %edx, (%edi)
+ movl 12(%ecx), %edx
+ movl %esi, 4(%edi)
+ movl 16(%ecx), %esi
+ adcl 12(%eax), %edx
+ adcl 16(%eax), %esi
+ movl %ebx, 8(%edi)
+ movl 20(%eax), %ebx
+ movl %edx, 12(%edi)
+ movl 20(%ecx), %edx
+ adcl %ebx, %edx
+ movl 24(%eax), %ebx
+ movl %esi, 16(%edi)
+ movl 24(%ecx), %esi
+ adcl %ebx, %esi
+ movl 28(%eax), %ebx
+ movl %edx, 20(%edi)
+ movl 28(%ecx), %edx
+ adcl %ebx, %edx
+ movl 32(%eax), %ebx
+ movl %esi, 24(%edi)
+ movl 32(%ecx), %esi
+ adcl %ebx, %esi
+ movl 36(%eax), %ebx
+ movl %edx, 28(%edi)
+ movl 36(%ecx), %edx
+ adcl %ebx, %edx
+ movl 40(%eax), %ebx
+ movl %esi, 32(%edi)
+ movl 40(%ecx), %esi
+ adcl %ebx, %esi
+ movl 44(%eax), %ebx
+ movl %edx, 36(%edi)
+ movl 44(%ecx), %edx
+ adcl %ebx, %edx
+ movl 48(%eax), %ebx
+ movl %esi, 40(%edi)
+ movl 48(%ecx), %esi
+ adcl %ebx, %esi
+ movl 52(%eax), %ebx
+ movl %edx, 44(%edi)
+ movl 52(%ecx), %edx
+ adcl %ebx, %edx
+ movl 56(%eax), %ebx
+ movl %esi, 48(%edi)
+ movl 56(%ecx), %esi
+ adcl %ebx, %esi
+ movl %edx, 52(%edi)
+ movl %esi, 56(%edi)
+ movl 60(%eax), %eax
+ movl 60(%ecx), %ecx
+ adcl %eax, %ecx
+ movl %ecx, 60(%edi)
+ sbbl %eax, %eax
+ andl $1, %eax
+ popl %esi
+ popl %edi
+ popl %ebx
+ retl
+.Lfunc_end245:
+ .size mcl_fp_addPre16L, .Lfunc_end245-mcl_fp_addPre16L
+
+ .globl mcl_fp_subPre16L
+ .align 16, 0x90
+ .type mcl_fp_subPre16L,@function
+mcl_fp_subPre16L: # @mcl_fp_subPre16L
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ movl 24(%esp), %ecx
+ movl (%ecx), %esi
+ movl 4(%ecx), %edi
+ xorl %eax, %eax
+ movl 28(%esp), %edx
+ subl (%edx), %esi
+ sbbl 4(%edx), %edi
+ movl 8(%ecx), %ebp
+ sbbl 8(%edx), %ebp
+ movl 20(%esp), %ebx
+ movl %esi, (%ebx)
+ movl 12(%ecx), %esi
+ sbbl 12(%edx), %esi
+ movl %edi, 4(%ebx)
+ movl 16(%ecx), %edi
+ sbbl 16(%edx), %edi
+ movl %ebp, 8(%ebx)
+ movl 20(%edx), %ebp
+ movl %esi, 12(%ebx)
+ movl 20(%ecx), %esi
+ sbbl %ebp, %esi
+ movl 24(%edx), %ebp
+ movl %edi, 16(%ebx)
+ movl 24(%ecx), %edi
+ sbbl %ebp, %edi
+ movl 28(%edx), %ebp
+ movl %esi, 20(%ebx)
+ movl 28(%ecx), %esi
+ sbbl %ebp, %esi
+ movl 32(%edx), %ebp
+ movl %edi, 24(%ebx)
+ movl 32(%ecx), %edi
+ sbbl %ebp, %edi
+ movl 36(%edx), %ebp
+ movl %esi, 28(%ebx)
+ movl 36(%ecx), %esi
+ sbbl %ebp, %esi
+ movl 40(%edx), %ebp
+ movl %edi, 32(%ebx)
+ movl 40(%ecx), %edi
+ sbbl %ebp, %edi
+ movl 44(%edx), %ebp
+ movl %esi, 36(%ebx)
+ movl 44(%ecx), %esi
+ sbbl %ebp, %esi
+ movl 48(%edx), %ebp
+ movl %edi, 40(%ebx)
+ movl 48(%ecx), %edi
+ sbbl %ebp, %edi
+ movl 52(%edx), %ebp
+ movl %esi, 44(%ebx)
+ movl 52(%ecx), %esi
+ sbbl %ebp, %esi
+ movl 56(%edx), %ebp
+ movl %edi, 48(%ebx)
+ movl 56(%ecx), %edi
+ sbbl %ebp, %edi
+ movl %esi, 52(%ebx)
+ movl %edi, 56(%ebx)
+ movl 60(%edx), %edx
+ movl 60(%ecx), %ecx
+ sbbl %edx, %ecx
+ movl %ecx, 60(%ebx)
+ sbbl $0, %eax
+ andl $1, %eax
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end246:
+ .size mcl_fp_subPre16L, .Lfunc_end246-mcl_fp_subPre16L
+
+ .globl mcl_fp_shr1_16L
+ .align 16, 0x90
+ .type mcl_fp_shr1_16L,@function
+mcl_fp_shr1_16L: # @mcl_fp_shr1_16L
+# BB#0:
+ pushl %esi
+ movl 12(%esp), %eax
+ movl (%eax), %edx
+ movl 4(%eax), %esi
+ shrdl $1, %esi, %edx
+ movl 8(%esp), %ecx
+ movl %edx, (%ecx)
+ movl 8(%eax), %edx
+ shrdl $1, %edx, %esi
+ movl %esi, 4(%ecx)
+ movl 12(%eax), %esi
+ shrdl $1, %esi, %edx
+ movl %edx, 8(%ecx)
+ movl 16(%eax), %edx
+ shrdl $1, %edx, %esi
+ movl %esi, 12(%ecx)
+ movl 20(%eax), %esi
+ shrdl $1, %esi, %edx
+ movl %edx, 16(%ecx)
+ movl 24(%eax), %edx
+ shrdl $1, %edx, %esi
+ movl %esi, 20(%ecx)
+ movl 28(%eax), %esi
+ shrdl $1, %esi, %edx
+ movl %edx, 24(%ecx)
+ movl 32(%eax), %edx
+ shrdl $1, %edx, %esi
+ movl %esi, 28(%ecx)
+ movl 36(%eax), %esi
+ shrdl $1, %esi, %edx
+ movl %edx, 32(%ecx)
+ movl 40(%eax), %edx
+ shrdl $1, %edx, %esi
+ movl %esi, 36(%ecx)
+ movl 44(%eax), %esi
+ shrdl $1, %esi, %edx
+ movl %edx, 40(%ecx)
+ movl 48(%eax), %edx
+ shrdl $1, %edx, %esi
+ movl %esi, 44(%ecx)
+ movl 52(%eax), %esi
+ shrdl $1, %esi, %edx
+ movl %edx, 48(%ecx)
+ movl 56(%eax), %edx
+ shrdl $1, %edx, %esi
+ movl %esi, 52(%ecx)
+ movl 60(%eax), %eax
+ shrdl $1, %eax, %edx
+ movl %edx, 56(%ecx)
+ shrl %eax
+ movl %eax, 60(%ecx)
+ popl %esi
+ retl
+.Lfunc_end247:
+ .size mcl_fp_shr1_16L, .Lfunc_end247-mcl_fp_shr1_16L
+
+ .globl mcl_fp_add16L
+ .align 16, 0x90
+ .type mcl_fp_add16L,@function
+mcl_fp_add16L: # @mcl_fp_add16L
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $56, %esp
+ movl 84(%esp), %edx
+ movl (%edx), %esi
+ movl 4(%edx), %ebp
+ movl 80(%esp), %ecx
+ addl (%ecx), %esi
+ movl %esi, %ebx
+ adcl 4(%ecx), %ebp
+ movl 8(%edx), %eax
+ adcl 8(%ecx), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 12(%ecx), %esi
+ movl 16(%ecx), %edi
+ adcl 12(%edx), %esi
+ movl %esi, 48(%esp) # 4-byte Spill
+ adcl 16(%edx), %edi
+ movl %edi, 12(%esp) # 4-byte Spill
+ movl 20(%ecx), %eax
+ adcl 20(%edx), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 24(%ecx), %eax
+ adcl 24(%edx), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 28(%ecx), %eax
+ adcl 28(%edx), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 32(%ecx), %eax
+ adcl 32(%edx), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 36(%ecx), %eax
+ adcl 36(%edx), %eax
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl 40(%ecx), %eax
+ adcl 40(%edx), %eax
+ movl %eax, 24(%esp) # 4-byte Spill
+ movl 44(%ecx), %eax
+ adcl 44(%edx), %eax
+ movl %eax, 20(%esp) # 4-byte Spill
+ movl 48(%ecx), %eax
+ adcl 48(%edx), %eax
+ movl %eax, 16(%esp) # 4-byte Spill
+ movl 52(%ecx), %eax
+ adcl 52(%edx), %eax
+ movl %eax, 8(%esp) # 4-byte Spill
+ movl 56(%ecx), %esi
+ adcl 56(%edx), %esi
+ movl 60(%ecx), %ecx
+ adcl 60(%edx), %ecx
+ movl 76(%esp), %edx
+ movl %ebx, (%edx)
+ movl %ebx, %eax
+ movl %ebp, 4(%edx)
+ movl 52(%esp), %ebx # 4-byte Reload
+ movl %ebx, 8(%edx)
+ movl 48(%esp), %ebx # 4-byte Reload
+ movl %ebx, 12(%edx)
+ movl %edi, 16(%edx)
+ movl 44(%esp), %edi # 4-byte Reload
+ movl %edi, 20(%edx)
+ movl 40(%esp), %edi # 4-byte Reload
+ movl %edi, 24(%edx)
+ movl 36(%esp), %edi # 4-byte Reload
+ movl %edi, 28(%edx)
+ movl 32(%esp), %edi # 4-byte Reload
+ movl %edi, 32(%edx)
+ movl 28(%esp), %edi # 4-byte Reload
+ movl %edi, 36(%edx)
+ movl 24(%esp), %edi # 4-byte Reload
+ movl %edi, 40(%edx)
+ movl 20(%esp), %edi # 4-byte Reload
+ movl %edi, 44(%edx)
+ movl 16(%esp), %edi # 4-byte Reload
+ movl %edi, 48(%edx)
+ movl 8(%esp), %edi # 4-byte Reload
+ movl %edi, 52(%edx)
+ movl %esi, 56(%edx)
+ movl %ecx, 60(%edx)
+ sbbl %ebx, %ebx
+ andl $1, %ebx
+ movl 88(%esp), %edi
+ subl (%edi), %eax
+ movl %eax, 4(%esp) # 4-byte Spill
+ sbbl 4(%edi), %ebp
+ movl %ebp, (%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ sbbl 8(%edi), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ sbbl 12(%edi), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 12(%esp), %eax # 4-byte Reload
+ sbbl 16(%edi), %eax
+ movl %eax, 12(%esp) # 4-byte Spill
+ movl 44(%esp), %eax # 4-byte Reload
+ sbbl 20(%edi), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 40(%esp), %eax # 4-byte Reload
+ sbbl 24(%edi), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 36(%esp), %eax # 4-byte Reload
+ sbbl 28(%edi), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 32(%esp), %eax # 4-byte Reload
+ sbbl 32(%edi), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 28(%esp), %eax # 4-byte Reload
+ sbbl 36(%edi), %eax
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl 24(%esp), %eax # 4-byte Reload
+ sbbl 40(%edi), %eax
+ movl %eax, 24(%esp) # 4-byte Spill
+ movl 20(%esp), %eax # 4-byte Reload
+ sbbl 44(%edi), %eax
+ movl %eax, 20(%esp) # 4-byte Spill
+ movl 16(%esp), %eax # 4-byte Reload
+ sbbl 48(%edi), %eax
+ movl %eax, 16(%esp) # 4-byte Spill
+ movl 8(%esp), %eax # 4-byte Reload
+ sbbl 52(%edi), %eax
+ movl %eax, %ebp
+ sbbl 56(%edi), %esi
+ sbbl 60(%edi), %ecx
+ sbbl $0, %ebx
+ testb $1, %bl
+ jne .LBB248_2
+# BB#1: # %nocarry
+ movl 4(%esp), %edi # 4-byte Reload
+ movl %edi, (%edx)
+ movl (%esp), %edi # 4-byte Reload
+ movl %edi, 4(%edx)
+ movl 52(%esp), %edi # 4-byte Reload
+ movl %edi, 8(%edx)
+ movl 48(%esp), %edi # 4-byte Reload
+ movl %edi, 12(%edx)
+ movl 12(%esp), %edi # 4-byte Reload
+ movl %edi, 16(%edx)
+ movl 44(%esp), %edi # 4-byte Reload
+ movl %edi, 20(%edx)
+ movl 40(%esp), %edi # 4-byte Reload
+ movl %edi, 24(%edx)
+ movl 36(%esp), %edi # 4-byte Reload
+ movl %edi, 28(%edx)
+ movl 32(%esp), %edi # 4-byte Reload
+ movl %edi, 32(%edx)
+ movl 28(%esp), %edi # 4-byte Reload
+ movl %edi, 36(%edx)
+ movl 24(%esp), %edi # 4-byte Reload
+ movl %edi, 40(%edx)
+ movl 20(%esp), %edi # 4-byte Reload
+ movl %edi, 44(%edx)
+ movl 16(%esp), %edi # 4-byte Reload
+ movl %edi, 48(%edx)
+ movl %ebp, 52(%edx)
+ movl %esi, 56(%edx)
+ movl %ecx, 60(%edx)
+.LBB248_2: # %carry
+ addl $56, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end248:
+ .size mcl_fp_add16L, .Lfunc_end248-mcl_fp_add16L
+
+ .globl mcl_fp_addNF16L
+ .align 16, 0x90
+ .type mcl_fp_addNF16L,@function
+mcl_fp_addNF16L: # @mcl_fp_addNF16L
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $124, %esp
+ movl 152(%esp), %edx
+ movl (%edx), %eax
+ movl 4(%edx), %ecx
+ movl 148(%esp), %esi
+ addl (%esi), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ adcl 4(%esi), %ecx
+ movl %ecx, 84(%esp) # 4-byte Spill
+ movl 60(%edx), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 56(%edx), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 52(%edx), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 48(%edx), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 44(%edx), %edi
+ movl 40(%edx), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 36(%edx), %eax
+ movl %eax, 120(%esp) # 4-byte Spill
+ movl 32(%edx), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 28(%edx), %eax
+ movl %eax, 116(%esp) # 4-byte Spill
+ movl 24(%edx), %eax
+ movl 20(%edx), %ebp
+ movl 16(%edx), %ebx
+ movl 12(%edx), %ecx
+ movl 8(%edx), %edx
+ adcl 8(%esi), %edx
+ movl %edx, 60(%esp) # 4-byte Spill
+ adcl 12(%esi), %ecx
+ movl %ecx, 64(%esp) # 4-byte Spill
+ adcl 16(%esi), %ebx
+ movl %ebx, 68(%esp) # 4-byte Spill
+ adcl 20(%esi), %ebp
+ movl %ebp, 72(%esp) # 4-byte Spill
+ adcl 24(%esi), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 116(%esp), %eax # 4-byte Reload
+ adcl 28(%esi), %eax
+ movl %eax, 116(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 32(%esi), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 120(%esp), %eax # 4-byte Reload
+ adcl 36(%esi), %eax
+ movl %eax, 120(%esp) # 4-byte Spill
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 40(%esi), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ adcl 44(%esi), %edi
+ movl %edi, 76(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 48(%esi), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 92(%esp), %edi # 4-byte Reload
+ adcl 52(%esi), %edi
+ movl %edi, 92(%esp) # 4-byte Spill
+ movl 96(%esp), %edi # 4-byte Reload
+ adcl 56(%esi), %edi
+ movl %edi, 96(%esp) # 4-byte Spill
+ movl 112(%esp), %eax # 4-byte Reload
+ adcl 60(%esi), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 156(%esp), %edi
+ movl 80(%esp), %esi # 4-byte Reload
+ subl (%edi), %esi
+ movl 84(%esp), %eax # 4-byte Reload
+ sbbl 4(%edi), %eax
+ movl %eax, (%esp) # 4-byte Spill
+ sbbl 8(%edi), %edx
+ movl %edx, 4(%esp) # 4-byte Spill
+ sbbl 12(%edi), %ecx
+ movl %ecx, 8(%esp) # 4-byte Spill
+ sbbl 16(%edi), %ebx
+ movl %ebx, 12(%esp) # 4-byte Spill
+ sbbl 20(%edi), %ebp
+ movl %ebp, 16(%esp) # 4-byte Spill
+ movl 88(%esp), %ebp # 4-byte Reload
+ sbbl 24(%edi), %ebp
+ movl %ebp, 20(%esp) # 4-byte Spill
+ movl 116(%esp), %eax # 4-byte Reload
+ sbbl 28(%edi), %eax
+ movl %eax, 24(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ sbbl 32(%edi), %eax
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl 120(%esp), %eax # 4-byte Reload
+ sbbl 36(%edi), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 104(%esp), %eax # 4-byte Reload
+ sbbl 40(%edi), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ movl %eax, %ecx
+ sbbl 44(%edi), %ecx
+ movl %ecx, 40(%esp) # 4-byte Spill
+ movl 108(%esp), %ecx # 4-byte Reload
+ sbbl 48(%edi), %ecx
+ movl %ecx, 44(%esp) # 4-byte Spill
+ movl 92(%esp), %ecx # 4-byte Reload
+ sbbl 52(%edi), %ecx
+ movl %ecx, 48(%esp) # 4-byte Spill
+ movl 96(%esp), %ecx # 4-byte Reload
+ movl %ecx, %ebx
+ sbbl 56(%edi), %ebx
+ movl %ebx, 52(%esp) # 4-byte Spill
+ movl 112(%esp), %ebx # 4-byte Reload
+ sbbl 60(%edi), %ebx
+ movl 80(%esp), %edi # 4-byte Reload
+ movl %ebx, 56(%esp) # 4-byte Spill
+ testl %ebx, %ebx
+ js .LBB249_2
+# BB#1:
+ movl %esi, %edi
+.LBB249_2:
+ movl 144(%esp), %ebx
+ movl %edi, (%ebx)
+ movl 84(%esp), %edx # 4-byte Reload
+ js .LBB249_4
+# BB#3:
+ movl (%esp), %edx # 4-byte Reload
+.LBB249_4:
+ movl %edx, 4(%ebx)
+ movl 68(%esp), %edx # 4-byte Reload
+ movl 60(%esp), %eax # 4-byte Reload
+ js .LBB249_6
+# BB#5:
+ movl 4(%esp), %eax # 4-byte Reload
+.LBB249_6:
+ movl %eax, 8(%ebx)
+ movl 100(%esp), %eax # 4-byte Reload
+ movl 88(%esp), %ecx # 4-byte Reload
+ movl 64(%esp), %esi # 4-byte Reload
+ js .LBB249_8
+# BB#7:
+ movl 8(%esp), %esi # 4-byte Reload
+.LBB249_8:
+ movl %esi, 12(%ebx)
+ movl 108(%esp), %esi # 4-byte Reload
+ js .LBB249_10
+# BB#9:
+ movl 12(%esp), %edx # 4-byte Reload
+.LBB249_10:
+ movl %edx, 16(%ebx)
+ movl 112(%esp), %edi # 4-byte Reload
+ movl 104(%esp), %ebp # 4-byte Reload
+ js .LBB249_12
+# BB#11:
+ movl 16(%esp), %edx # 4-byte Reload
+ movl %edx, 72(%esp) # 4-byte Spill
+.LBB249_12:
+ movl 72(%esp), %edx # 4-byte Reload
+ movl %edx, 20(%ebx)
+ js .LBB249_14
+# BB#13:
+ movl 20(%esp), %ecx # 4-byte Reload
+.LBB249_14:
+ movl %ecx, 24(%ebx)
+ js .LBB249_16
+# BB#15:
+ movl 24(%esp), %ecx # 4-byte Reload
+ movl %ecx, 116(%esp) # 4-byte Spill
+.LBB249_16:
+ movl 116(%esp), %ecx # 4-byte Reload
+ movl %ecx, 28(%ebx)
+ js .LBB249_18
+# BB#17:
+ movl 28(%esp), %eax # 4-byte Reload
+.LBB249_18:
+ movl %eax, 32(%ebx)
+ movl 96(%esp), %ecx # 4-byte Reload
+ js .LBB249_20
+# BB#19:
+ movl 32(%esp), %eax # 4-byte Reload
+ movl %eax, 120(%esp) # 4-byte Spill
+.LBB249_20:
+ movl 120(%esp), %eax # 4-byte Reload
+ movl %eax, 36(%ebx)
+ js .LBB249_22
+# BB#21:
+ movl 36(%esp), %ebp # 4-byte Reload
+.LBB249_22:
+ movl %ebp, 40(%ebx)
+ movl 76(%esp), %eax # 4-byte Reload
+ js .LBB249_24
+# BB#23:
+ movl 40(%esp), %eax # 4-byte Reload
+.LBB249_24:
+ movl %eax, 44(%ebx)
+ movl 92(%esp), %eax # 4-byte Reload
+ js .LBB249_26
+# BB#25:
+ movl 44(%esp), %esi # 4-byte Reload
+.LBB249_26:
+ movl %esi, 48(%ebx)
+ js .LBB249_28
+# BB#27:
+ movl 48(%esp), %eax # 4-byte Reload
+.LBB249_28:
+ movl %eax, 52(%ebx)
+ js .LBB249_30
+# BB#29:
+ movl 52(%esp), %ecx # 4-byte Reload
+.LBB249_30:
+ movl %ecx, 56(%ebx)
+ js .LBB249_32
+# BB#31:
+ movl 56(%esp), %edi # 4-byte Reload
+.LBB249_32:
+ movl %edi, 60(%ebx)
+ addl $124, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end249:
+ .size mcl_fp_addNF16L, .Lfunc_end249-mcl_fp_addNF16L
+
+ .globl mcl_fp_sub16L
+ .align 16, 0x90
+ .type mcl_fp_sub16L,@function
+mcl_fp_sub16L: # @mcl_fp_sub16L
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $60, %esp
+ movl 84(%esp), %esi
+ movl (%esi), %eax
+ movl 4(%esi), %ecx
+ xorl %ebx, %ebx
+ movl 88(%esp), %edi
+ subl (%edi), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ sbbl 4(%edi), %ecx
+ movl %ecx, 16(%esp) # 4-byte Spill
+ movl 8(%esi), %eax
+ sbbl 8(%edi), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 12(%esi), %eax
+ sbbl 12(%edi), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 16(%esi), %eax
+ sbbl 16(%edi), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 20(%esi), %eax
+ sbbl 20(%edi), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 24(%esi), %eax
+ sbbl 24(%edi), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 28(%esi), %eax
+ sbbl 28(%edi), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 32(%esi), %eax
+ sbbl 32(%edi), %eax
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl 36(%esi), %eax
+ sbbl 36(%edi), %eax
+ movl %eax, 24(%esp) # 4-byte Spill
+ movl 40(%esi), %eax
+ sbbl 40(%edi), %eax
+ movl %eax, 20(%esp) # 4-byte Spill
+ movl 44(%esi), %edx
+ sbbl 44(%edi), %edx
+ movl %edx, 12(%esp) # 4-byte Spill
+ movl 48(%esi), %ecx
+ sbbl 48(%edi), %ecx
+ movl %ecx, 8(%esp) # 4-byte Spill
+ movl 52(%esi), %eax
+ sbbl 52(%edi), %eax
+ movl %eax, 4(%esp) # 4-byte Spill
+ movl 56(%esi), %ebp
+ sbbl 56(%edi), %ebp
+ movl 60(%esi), %esi
+ sbbl 60(%edi), %esi
+ sbbl $0, %ebx
+ testb $1, %bl
+ movl 80(%esp), %ebx
+ movl 52(%esp), %edi # 4-byte Reload
+ movl %edi, (%ebx)
+ movl 16(%esp), %edi # 4-byte Reload
+ movl %edi, 4(%ebx)
+ movl 44(%esp), %edi # 4-byte Reload
+ movl %edi, 8(%ebx)
+ movl 56(%esp), %edi # 4-byte Reload
+ movl %edi, 12(%ebx)
+ movl 48(%esp), %edi # 4-byte Reload
+ movl %edi, 16(%ebx)
+ movl 40(%esp), %edi # 4-byte Reload
+ movl %edi, 20(%ebx)
+ movl 36(%esp), %edi # 4-byte Reload
+ movl %edi, 24(%ebx)
+ movl 32(%esp), %edi # 4-byte Reload
+ movl %edi, 28(%ebx)
+ movl 28(%esp), %edi # 4-byte Reload
+ movl %edi, 32(%ebx)
+ movl 24(%esp), %edi # 4-byte Reload
+ movl %edi, 36(%ebx)
+ movl 20(%esp), %edi # 4-byte Reload
+ movl %edi, 40(%ebx)
+ movl %edx, 44(%ebx)
+ movl %ecx, 48(%ebx)
+ movl %eax, 52(%ebx)
+ movl %ebp, 56(%ebx)
+ movl %esi, 60(%ebx)
+ je .LBB250_2
+# BB#1: # %carry
+ movl %esi, (%esp) # 4-byte Spill
+ movl 92(%esp), %esi
+ movl 52(%esp), %ecx # 4-byte Reload
+ addl (%esi), %ecx
+ movl %ecx, (%ebx)
+ movl 16(%esp), %edx # 4-byte Reload
+ adcl 4(%esi), %edx
+ movl %edx, 4(%ebx)
+ movl 44(%esp), %edi # 4-byte Reload
+ adcl 8(%esi), %edi
+ movl 12(%esi), %eax
+ adcl 56(%esp), %eax # 4-byte Folded Reload
+ movl %edi, 8(%ebx)
+ movl 16(%esi), %ecx
+ adcl 48(%esp), %ecx # 4-byte Folded Reload
+ movl %eax, 12(%ebx)
+ movl 20(%esi), %eax
+ adcl 40(%esp), %eax # 4-byte Folded Reload
+ movl %ecx, 16(%ebx)
+ movl 24(%esi), %ecx
+ adcl 36(%esp), %ecx # 4-byte Folded Reload
+ movl %eax, 20(%ebx)
+ movl 28(%esi), %eax
+ adcl 32(%esp), %eax # 4-byte Folded Reload
+ movl %ecx, 24(%ebx)
+ movl 32(%esi), %ecx
+ adcl 28(%esp), %ecx # 4-byte Folded Reload
+ movl %eax, 28(%ebx)
+ movl 36(%esi), %eax
+ adcl 24(%esp), %eax # 4-byte Folded Reload
+ movl %ecx, 32(%ebx)
+ movl 40(%esi), %ecx
+ adcl 20(%esp), %ecx # 4-byte Folded Reload
+ movl %eax, 36(%ebx)
+ movl 44(%esi), %eax
+ adcl 12(%esp), %eax # 4-byte Folded Reload
+ movl %ecx, 40(%ebx)
+ movl 48(%esi), %ecx
+ adcl 8(%esp), %ecx # 4-byte Folded Reload
+ movl %eax, 44(%ebx)
+ movl 52(%esi), %eax
+ adcl 4(%esp), %eax # 4-byte Folded Reload
+ movl %ecx, 48(%ebx)
+ movl %eax, 52(%ebx)
+ movl 56(%esi), %eax
+ adcl %ebp, %eax
+ movl %eax, 56(%ebx)
+ movl 60(%esi), %eax
+ adcl (%esp), %eax # 4-byte Folded Reload
+ movl %eax, 60(%ebx)
+.LBB250_2: # %nocarry
+ addl $60, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end250:
+ .size mcl_fp_sub16L, .Lfunc_end250-mcl_fp_sub16L
+
+ .globl mcl_fp_subNF16L
+ .align 16, 0x90
+ .type mcl_fp_subNF16L,@function
+mcl_fp_subNF16L: # @mcl_fp_subNF16L
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $104, %esp
+ movl 128(%esp), %ecx
+ movl (%ecx), %esi
+ movl 4(%ecx), %edx
+ movl 132(%esp), %edi
+ subl (%edi), %esi
+ movl %esi, 64(%esp) # 4-byte Spill
+ sbbl 4(%edi), %edx
+ movl %edx, 68(%esp) # 4-byte Spill
+ movl 60(%ecx), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 56(%ecx), %edx
+ movl %edx, 96(%esp) # 4-byte Spill
+ movl 52(%ecx), %edx
+ movl %edx, 92(%esp) # 4-byte Spill
+ movl 48(%ecx), %edx
+ movl %edx, 88(%esp) # 4-byte Spill
+ movl 44(%ecx), %edx
+ movl %edx, 84(%esp) # 4-byte Spill
+ movl 40(%ecx), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 36(%ecx), %esi
+ movl %esi, 100(%esp) # 4-byte Spill
+ movl 32(%ecx), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 28(%ecx), %ebp
+ movl 24(%ecx), %ebx
+ movl 20(%ecx), %esi
+ movl 16(%ecx), %edx
+ movl 12(%ecx), %eax
+ movl 8(%ecx), %ecx
+ sbbl 8(%edi), %ecx
+ movl %ecx, 32(%esp) # 4-byte Spill
+ sbbl 12(%edi), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ sbbl 16(%edi), %edx
+ movl %edx, 44(%esp) # 4-byte Spill
+ sbbl 20(%edi), %esi
+ movl %esi, 48(%esp) # 4-byte Spill
+ sbbl 24(%edi), %ebx
+ movl %ebx, 52(%esp) # 4-byte Spill
+ sbbl 28(%edi), %ebp
+ movl %ebp, 56(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ sbbl 32(%edi), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 100(%esp), %ecx # 4-byte Reload
+ sbbl 36(%edi), %ecx
+ movl %ecx, 100(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ sbbl 40(%edi), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 84(%esp), %ecx # 4-byte Reload
+ sbbl 44(%edi), %ecx
+ movl %ecx, 84(%esp) # 4-byte Spill
+ movl 88(%esp), %ecx # 4-byte Reload
+ sbbl 48(%edi), %ecx
+ movl %ecx, 88(%esp) # 4-byte Spill
+ movl 92(%esp), %ecx # 4-byte Reload
+ sbbl 52(%edi), %ecx
+ movl %ecx, 92(%esp) # 4-byte Spill
+ movl 96(%esp), %ecx # 4-byte Reload
+ sbbl 56(%edi), %ecx
+ movl %ecx, 96(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ sbbl 60(%edi), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ sarl $31, %eax
+ movl 136(%esp), %esi
+ movl 60(%esi), %ecx
+ andl %eax, %ecx
+ movl %ecx, 60(%esp) # 4-byte Spill
+ movl 56(%esi), %ecx
+ andl %eax, %ecx
+ movl %ecx, 36(%esp) # 4-byte Spill
+ movl 52(%esi), %ecx
+ andl %eax, %ecx
+ movl %ecx, 28(%esp) # 4-byte Spill
+ movl 48(%esi), %ecx
+ andl %eax, %ecx
+ movl %ecx, 24(%esp) # 4-byte Spill
+ movl 44(%esi), %ecx
+ andl %eax, %ecx
+ movl %ecx, 20(%esp) # 4-byte Spill
+ movl 40(%esi), %ecx
+ andl %eax, %ecx
+ movl %ecx, 16(%esp) # 4-byte Spill
+ movl 36(%esi), %ecx
+ andl %eax, %ecx
+ movl %ecx, 12(%esp) # 4-byte Spill
+ movl 32(%esi), %ecx
+ andl %eax, %ecx
+ movl %ecx, 8(%esp) # 4-byte Spill
+ movl 28(%esi), %ecx
+ andl %eax, %ecx
+ movl %ecx, 4(%esp) # 4-byte Spill
+ movl 24(%esi), %ecx
+ andl %eax, %ecx
+ movl %ecx, (%esp) # 4-byte Spill
+ movl 20(%esi), %ebp
+ andl %eax, %ebp
+ movl 16(%esi), %ebx
+ andl %eax, %ebx
+ movl 12(%esi), %edi
+ andl %eax, %edi
+ movl 8(%esi), %edx
+ andl %eax, %edx
+ movl 4(%esi), %ecx
+ andl %eax, %ecx
+ andl (%esi), %eax
+ addl 64(%esp), %eax # 4-byte Folded Reload
+ adcl 68(%esp), %ecx # 4-byte Folded Reload
+ movl 124(%esp), %esi
+ movl %eax, (%esi)
+ adcl 32(%esp), %edx # 4-byte Folded Reload
+ movl %ecx, 4(%esi)
+ adcl 40(%esp), %edi # 4-byte Folded Reload
+ movl %edx, 8(%esi)
+ adcl 44(%esp), %ebx # 4-byte Folded Reload
+ movl %edi, 12(%esi)
+ adcl 48(%esp), %ebp # 4-byte Folded Reload
+ movl %ebx, 16(%esi)
+ movl (%esp), %eax # 4-byte Reload
+ adcl 52(%esp), %eax # 4-byte Folded Reload
+ movl %ebp, 20(%esi)
+ movl 4(%esp), %ecx # 4-byte Reload
+ adcl 56(%esp), %ecx # 4-byte Folded Reload
+ movl %eax, 24(%esi)
+ movl 8(%esp), %eax # 4-byte Reload
+ adcl 72(%esp), %eax # 4-byte Folded Reload
+ movl %ecx, 28(%esi)
+ movl 12(%esp), %ecx # 4-byte Reload
+ adcl 100(%esp), %ecx # 4-byte Folded Reload
+ movl %eax, 32(%esi)
+ movl 16(%esp), %eax # 4-byte Reload
+ adcl 76(%esp), %eax # 4-byte Folded Reload
+ movl %ecx, 36(%esi)
+ movl 20(%esp), %ecx # 4-byte Reload
+ adcl 84(%esp), %ecx # 4-byte Folded Reload
+ movl %eax, 40(%esi)
+ movl 24(%esp), %eax # 4-byte Reload
+ adcl 88(%esp), %eax # 4-byte Folded Reload
+ movl %ecx, 44(%esi)
+ movl 28(%esp), %ecx # 4-byte Reload
+ adcl 92(%esp), %ecx # 4-byte Folded Reload
+ movl %eax, 48(%esi)
+ movl 36(%esp), %eax # 4-byte Reload
+ adcl 96(%esp), %eax # 4-byte Folded Reload
+ movl %ecx, 52(%esi)
+ movl %eax, 56(%esi)
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 80(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 60(%esi)
+ addl $104, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end251:
+ .size mcl_fp_subNF16L, .Lfunc_end251-mcl_fp_subNF16L
+
+ .globl mcl_fpDbl_add16L
+ .align 16, 0x90
+ .type mcl_fpDbl_add16L,@function
+mcl_fpDbl_add16L: # @mcl_fpDbl_add16L
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $116, %esp
+ movl 144(%esp), %ecx
+ movl (%ecx), %esi
+ movl 4(%ecx), %edx
+ movl 140(%esp), %ebx
+ addl (%ebx), %esi
+ adcl 4(%ebx), %edx
+ movl 8(%ecx), %edi
+ adcl 8(%ebx), %edi
+ movl 12(%ebx), %ebp
+ movl 136(%esp), %eax
+ movl %esi, (%eax)
+ movl 16(%ebx), %esi
+ adcl 12(%ecx), %ebp
+ adcl 16(%ecx), %esi
+ movl %edx, 4(%eax)
+ movl 72(%ecx), %edx
+ movl %edx, 112(%esp) # 4-byte Spill
+ movl %edi, 8(%eax)
+ movl 20(%ecx), %edx
+ movl %ebp, 12(%eax)
+ movl 20(%ebx), %edi
+ adcl %edx, %edi
+ movl 24(%ecx), %edx
+ movl %esi, 16(%eax)
+ movl 24(%ebx), %esi
+ adcl %edx, %esi
+ movl 28(%ecx), %edx
+ movl %edi, 20(%eax)
+ movl 28(%ebx), %edi
+ adcl %edx, %edi
+ movl 32(%ecx), %edx
+ movl %esi, 24(%eax)
+ movl 32(%ebx), %esi
+ adcl %edx, %esi
+ movl 36(%ecx), %edx
+ movl %edi, 28(%eax)
+ movl 36(%ebx), %edi
+ adcl %edx, %edi
+ movl 40(%ecx), %edx
+ movl %esi, 32(%eax)
+ movl 40(%ebx), %esi
+ adcl %edx, %esi
+ movl 44(%ecx), %edx
+ movl %edi, 36(%eax)
+ movl 44(%ebx), %edi
+ adcl %edx, %edi
+ movl 48(%ecx), %edx
+ movl %esi, 40(%eax)
+ movl 48(%ebx), %esi
+ adcl %edx, %esi
+ movl 52(%ecx), %edx
+ movl %edi, 44(%eax)
+ movl 52(%ebx), %edi
+ adcl %edx, %edi
+ movl 56(%ecx), %edx
+ movl %esi, 48(%eax)
+ movl 56(%ebx), %esi
+ adcl %edx, %esi
+ movl 60(%ecx), %edx
+ movl %edi, 52(%eax)
+ movl 60(%ebx), %ebp
+ adcl %edx, %ebp
+ movl 64(%ecx), %edx
+ movl %esi, 56(%eax)
+ movl 64(%ebx), %esi
+ adcl %edx, %esi
+ movl %esi, 72(%esp) # 4-byte Spill
+ movl 68(%ecx), %edx
+ movl %ebp, 60(%eax)
+ movl 68(%ebx), %eax
+ adcl %edx, %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 72(%ebx), %eax
+ adcl 112(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 76(%ecx), %ebp
+ movl 76(%ebx), %eax
+ adcl %ebp, %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 80(%ecx), %ebp
+ movl 80(%ebx), %eax
+ adcl %ebp, %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 84(%ecx), %ebp
+ movl 84(%ebx), %eax
+ adcl %ebp, %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 88(%ecx), %ebp
+ movl 88(%ebx), %eax
+ adcl %ebp, %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 92(%ecx), %ebp
+ movl 92(%ebx), %eax
+ adcl %ebp, %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 96(%ecx), %ebp
+ movl 96(%ebx), %eax
+ adcl %ebp, %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 100(%ecx), %ebp
+ movl 100(%ebx), %edx
+ adcl %ebp, %edx
+ movl %edx, 112(%esp) # 4-byte Spill
+ movl 104(%ecx), %ebp
+ movl 104(%ebx), %edx
+ adcl %ebp, %edx
+ movl %edx, 96(%esp) # 4-byte Spill
+ movl 108(%ecx), %ebp
+ movl 108(%ebx), %edx
+ adcl %ebp, %edx
+ movl %edx, 100(%esp) # 4-byte Spill
+ movl 112(%ecx), %edx
+ movl 112(%ebx), %ebp
+ adcl %edx, %ebp
+ movl %ebp, 60(%esp) # 4-byte Spill
+ movl 116(%ecx), %edx
+ movl 116(%ebx), %esi
+ adcl %edx, %esi
+ movl %esi, 68(%esp) # 4-byte Spill
+ movl 120(%ecx), %edx
+ movl 120(%ebx), %edi
+ adcl %edx, %edi
+ movl 124(%ecx), %ecx
+ movl 124(%ebx), %esi
+ adcl %ecx, %esi
+ sbbl %ecx, %ecx
+ andl $1, %ecx
+ movl 148(%esp), %edx
+ movl 72(%esp), %ebx # 4-byte Reload
+ subl (%edx), %ebx
+ movl %ebx, (%esp) # 4-byte Spill
+ movl 76(%esp), %ebx # 4-byte Reload
+ sbbl 4(%edx), %ebx
+ movl %ebx, 4(%esp) # 4-byte Spill
+ movl 80(%esp), %ebx # 4-byte Reload
+ sbbl 8(%edx), %ebx
+ movl %ebx, 8(%esp) # 4-byte Spill
+ movl 84(%esp), %ebx # 4-byte Reload
+ sbbl 12(%edx), %ebx
+ movl %ebx, 12(%esp) # 4-byte Spill
+ movl 104(%esp), %ebx # 4-byte Reload
+ sbbl 16(%edx), %ebx
+ movl %ebx, 16(%esp) # 4-byte Spill
+ movl 88(%esp), %ebx # 4-byte Reload
+ sbbl 20(%edx), %ebx
+ movl %ebx, 20(%esp) # 4-byte Spill
+ movl 108(%esp), %ebx # 4-byte Reload
+ sbbl 24(%edx), %ebx
+ movl %ebx, 24(%esp) # 4-byte Spill
+ movl 92(%esp), %ebx # 4-byte Reload
+ sbbl 28(%edx), %ebx
+ movl %ebx, 28(%esp) # 4-byte Spill
+ movl %eax, %ebx
+ sbbl 32(%edx), %ebx
+ movl 112(%esp), %eax # 4-byte Reload
+ sbbl 36(%edx), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ sbbl 40(%edx), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ sbbl 44(%edx), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ sbbl 48(%edx), %ebp
+ movl %ebp, 44(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ movl %eax, %ebp
+ sbbl 52(%edx), %ebp
+ movl %ebp, 48(%esp) # 4-byte Spill
+ movl %edi, %ebp
+ sbbl 56(%edx), %ebp
+ movl %ebp, 52(%esp) # 4-byte Spill
+ movl %esi, %ebp
+ sbbl 60(%edx), %ebp
+ movl %ebp, 56(%esp) # 4-byte Spill
+ sbbl $0, %ecx
+ andl $1, %ecx
+ jne .LBB252_2
+# BB#1:
+ movl %ebx, 64(%esp) # 4-byte Spill
+.LBB252_2:
+ testb %cl, %cl
+ movl 72(%esp), %ecx # 4-byte Reload
+ jne .LBB252_4
+# BB#3:
+ movl (%esp), %ecx # 4-byte Reload
+.LBB252_4:
+ movl 136(%esp), %ebx
+ movl %ecx, 64(%ebx)
+ movl %esi, %ebp
+ movl %edi, 72(%esp) # 4-byte Spill
+ movl 96(%esp), %edi # 4-byte Reload
+ movl 92(%esp), %ecx # 4-byte Reload
+ movl 88(%esp), %edx # 4-byte Reload
+ movl 76(%esp), %esi # 4-byte Reload
+ jne .LBB252_6
+# BB#5:
+ movl 4(%esp), %esi # 4-byte Reload
+.LBB252_6:
+ movl %esi, 68(%ebx)
+ movl 84(%esp), %esi # 4-byte Reload
+ movl 80(%esp), %eax # 4-byte Reload
+ jne .LBB252_8
+# BB#7:
+ movl 8(%esp), %eax # 4-byte Reload
+.LBB252_8:
+ movl %eax, 72(%ebx)
+ movl 60(%esp), %eax # 4-byte Reload
+ jne .LBB252_10
+# BB#9:
+ movl 12(%esp), %esi # 4-byte Reload
+.LBB252_10:
+ movl %esi, 76(%ebx)
+ jne .LBB252_12
+# BB#11:
+ movl 16(%esp), %esi # 4-byte Reload
+ movl %esi, 104(%esp) # 4-byte Spill
+.LBB252_12:
+ movl 104(%esp), %esi # 4-byte Reload
+ movl %esi, 80(%ebx)
+ jne .LBB252_14
+# BB#13:
+ movl 20(%esp), %edx # 4-byte Reload
+.LBB252_14:
+ movl %edx, 84(%ebx)
+ jne .LBB252_16
+# BB#15:
+ movl 24(%esp), %edx # 4-byte Reload
+ movl %edx, 108(%esp) # 4-byte Spill
+.LBB252_16:
+ movl 108(%esp), %edx # 4-byte Reload
+ movl %edx, 88(%ebx)
+ jne .LBB252_18
+# BB#17:
+ movl 28(%esp), %ecx # 4-byte Reload
+.LBB252_18:
+ movl %ecx, 92(%ebx)
+ movl 64(%esp), %ecx # 4-byte Reload
+ movl %ecx, 96(%ebx)
+ jne .LBB252_20
+# BB#19:
+ movl 32(%esp), %ecx # 4-byte Reload
+ movl %ecx, 112(%esp) # 4-byte Spill
+.LBB252_20:
+ movl 112(%esp), %ecx # 4-byte Reload
+ movl %ecx, 100(%ebx)
+ jne .LBB252_22
+# BB#21:
+ movl 36(%esp), %edi # 4-byte Reload
+.LBB252_22:
+ movl %edi, 104(%ebx)
+ movl 100(%esp), %ecx # 4-byte Reload
+ jne .LBB252_24
+# BB#23:
+ movl 40(%esp), %ecx # 4-byte Reload
+.LBB252_24:
+ movl %ecx, 108(%ebx)
+ movl 72(%esp), %ecx # 4-byte Reload
+ jne .LBB252_26
+# BB#25:
+ movl 44(%esp), %eax # 4-byte Reload
+.LBB252_26:
+ movl %eax, 112(%ebx)
+ movl 68(%esp), %eax # 4-byte Reload
+ jne .LBB252_28
+# BB#27:
+ movl 48(%esp), %eax # 4-byte Reload
+.LBB252_28:
+ movl %eax, 116(%ebx)
+ jne .LBB252_30
+# BB#29:
+ movl 52(%esp), %ecx # 4-byte Reload
+.LBB252_30:
+ movl %ecx, 120(%ebx)
+ jne .LBB252_32
+# BB#31:
+ movl 56(%esp), %ebp # 4-byte Reload
+.LBB252_32:
+ movl %ebp, 124(%ebx)
+ addl $116, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end252:
+ .size mcl_fpDbl_add16L, .Lfunc_end252-mcl_fpDbl_add16L
+
+ .globl mcl_fpDbl_sub16L
+ .align 16, 0x90
+ .type mcl_fpDbl_sub16L,@function
+mcl_fpDbl_sub16L: # @mcl_fpDbl_sub16L
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $108, %esp
+ movl 132(%esp), %eax
+ movl (%eax), %esi
+ movl 4(%eax), %edi
+ movl 136(%esp), %edx
+ subl (%edx), %esi
+ sbbl 4(%edx), %edi
+ movl 8(%eax), %ebx
+ sbbl 8(%edx), %ebx
+ movl 128(%esp), %ecx
+ movl %esi, (%ecx)
+ movl 12(%eax), %esi
+ sbbl 12(%edx), %esi
+ movl %edi, 4(%ecx)
+ movl 16(%eax), %edi
+ sbbl 16(%edx), %edi
+ movl %ebx, 8(%ecx)
+ movl 20(%edx), %ebx
+ movl %esi, 12(%ecx)
+ movl 20(%eax), %esi
+ sbbl %ebx, %esi
+ movl 24(%edx), %ebx
+ movl %edi, 16(%ecx)
+ movl 24(%eax), %edi
+ sbbl %ebx, %edi
+ movl 28(%edx), %ebx
+ movl %esi, 20(%ecx)
+ movl 28(%eax), %esi
+ sbbl %ebx, %esi
+ movl 32(%edx), %ebx
+ movl %edi, 24(%ecx)
+ movl 32(%eax), %edi
+ sbbl %ebx, %edi
+ movl 36(%edx), %ebx
+ movl %esi, 28(%ecx)
+ movl 36(%eax), %esi
+ sbbl %ebx, %esi
+ movl 40(%edx), %ebx
+ movl %edi, 32(%ecx)
+ movl 40(%eax), %edi
+ sbbl %ebx, %edi
+ movl 44(%edx), %ebx
+ movl %esi, 36(%ecx)
+ movl 44(%eax), %esi
+ sbbl %ebx, %esi
+ movl 48(%edx), %ebx
+ movl %edi, 40(%ecx)
+ movl 48(%eax), %edi
+ sbbl %ebx, %edi
+ movl 52(%edx), %ebx
+ movl %esi, 44(%ecx)
+ movl 52(%eax), %esi
+ sbbl %ebx, %esi
+ movl 56(%edx), %ebx
+ movl %edi, 48(%ecx)
+ movl 56(%eax), %edi
+ sbbl %ebx, %edi
+ movl 60(%edx), %ebx
+ movl %esi, 52(%ecx)
+ movl 60(%eax), %esi
+ sbbl %ebx, %esi
+ movl 64(%edx), %ebx
+ movl %edi, 56(%ecx)
+ movl 64(%eax), %edi
+ sbbl %ebx, %edi
+ movl %edi, 48(%esp) # 4-byte Spill
+ movl 68(%edx), %edi
+ movl %esi, 60(%ecx)
+ movl 68(%eax), %esi
+ sbbl %edi, %esi
+ movl %esi, 40(%esp) # 4-byte Spill
+ movl 72(%edx), %esi
+ movl 72(%eax), %edi
+ sbbl %esi, %edi
+ movl %edi, 44(%esp) # 4-byte Spill
+ movl 76(%edx), %esi
+ movl 76(%eax), %edi
+ sbbl %esi, %edi
+ movl %edi, 52(%esp) # 4-byte Spill
+ movl 80(%edx), %esi
+ movl 80(%eax), %edi
+ sbbl %esi, %edi
+ movl %edi, 56(%esp) # 4-byte Spill
+ movl 84(%edx), %esi
+ movl 84(%eax), %edi
+ sbbl %esi, %edi
+ movl %edi, 60(%esp) # 4-byte Spill
+ movl 88(%edx), %esi
+ movl 88(%eax), %edi
+ sbbl %esi, %edi
+ movl %edi, 64(%esp) # 4-byte Spill
+ movl 92(%edx), %esi
+ movl 92(%eax), %edi
+ sbbl %esi, %edi
+ movl %edi, 72(%esp) # 4-byte Spill
+ movl 96(%edx), %esi
+ movl 96(%eax), %edi
+ sbbl %esi, %edi
+ movl %edi, 76(%esp) # 4-byte Spill
+ movl 100(%edx), %esi
+ movl 100(%eax), %edi
+ sbbl %esi, %edi
+ movl %edi, 80(%esp) # 4-byte Spill
+ movl 104(%edx), %esi
+ movl 104(%eax), %edi
+ sbbl %esi, %edi
+ movl %edi, 84(%esp) # 4-byte Spill
+ movl 108(%edx), %esi
+ movl 108(%eax), %edi
+ sbbl %esi, %edi
+ movl %edi, 88(%esp) # 4-byte Spill
+ movl 112(%edx), %esi
+ movl 112(%eax), %edi
+ sbbl %esi, %edi
+ movl %edi, 92(%esp) # 4-byte Spill
+ movl 116(%edx), %esi
+ movl 116(%eax), %edi
+ sbbl %esi, %edi
+ movl %edi, 96(%esp) # 4-byte Spill
+ movl 120(%edx), %esi
+ movl 120(%eax), %edi
+ sbbl %esi, %edi
+ movl %edi, 100(%esp) # 4-byte Spill
+ movl 124(%edx), %edx
+ movl 124(%eax), %eax
+ sbbl %edx, %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl $0, %eax
+ sbbl $0, %eax
+ andl $1, %eax
+ movl 140(%esp), %ebx
+ jne .LBB253_1
+# BB#2:
+ movl $0, 68(%esp) # 4-byte Folded Spill
+ jmp .LBB253_3
+.LBB253_1:
+ movl 60(%ebx), %edx
+ movl %edx, 68(%esp) # 4-byte Spill
+.LBB253_3:
+ testb %al, %al
+ jne .LBB253_4
+# BB#5:
+ movl $0, 24(%esp) # 4-byte Folded Spill
+ movl $0, %ebp
+ jmp .LBB253_6
+.LBB253_4:
+ movl (%ebx), %ebp
+ movl 4(%ebx), %eax
+ movl %eax, 24(%esp) # 4-byte Spill
+.LBB253_6:
+ jne .LBB253_7
+# BB#8:
+ movl $0, 36(%esp) # 4-byte Folded Spill
+ jmp .LBB253_9
+.LBB253_7:
+ movl 56(%ebx), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+.LBB253_9:
+ jne .LBB253_10
+# BB#11:
+ movl $0, 32(%esp) # 4-byte Folded Spill
+ jmp .LBB253_12
+.LBB253_10:
+ movl 52(%ebx), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+.LBB253_12:
+ jne .LBB253_13
+# BB#14:
+ movl $0, 28(%esp) # 4-byte Folded Spill
+ jmp .LBB253_15
+.LBB253_13:
+ movl 48(%ebx), %eax
+ movl %eax, 28(%esp) # 4-byte Spill
+.LBB253_15:
+ jne .LBB253_16
+# BB#17:
+ movl $0, 20(%esp) # 4-byte Folded Spill
+ jmp .LBB253_18
+.LBB253_16:
+ movl 44(%ebx), %eax
+ movl %eax, 20(%esp) # 4-byte Spill
+.LBB253_18:
+ jne .LBB253_19
+# BB#20:
+ movl $0, 16(%esp) # 4-byte Folded Spill
+ jmp .LBB253_21
+.LBB253_19:
+ movl 40(%ebx), %eax
+ movl %eax, 16(%esp) # 4-byte Spill
+.LBB253_21:
+ jne .LBB253_22
+# BB#23:
+ movl $0, 12(%esp) # 4-byte Folded Spill
+ jmp .LBB253_24
+.LBB253_22:
+ movl 36(%ebx), %eax
+ movl %eax, 12(%esp) # 4-byte Spill
+.LBB253_24:
+ jne .LBB253_25
+# BB#26:
+ movl $0, 8(%esp) # 4-byte Folded Spill
+ jmp .LBB253_27
+.LBB253_25:
+ movl 32(%ebx), %eax
+ movl %eax, 8(%esp) # 4-byte Spill
+.LBB253_27:
+ jne .LBB253_28
+# BB#29:
+ movl $0, 4(%esp) # 4-byte Folded Spill
+ jmp .LBB253_30
+.LBB253_28:
+ movl 28(%ebx), %eax
+ movl %eax, 4(%esp) # 4-byte Spill
+.LBB253_30:
+ jne .LBB253_31
+# BB#32:
+ movl $0, (%esp) # 4-byte Folded Spill
+ jmp .LBB253_33
+.LBB253_31:
+ movl 24(%ebx), %eax
+ movl %eax, (%esp) # 4-byte Spill
+.LBB253_33:
+ jne .LBB253_34
+# BB#35:
+ movl $0, %esi
+ jmp .LBB253_36
+.LBB253_34:
+ movl 20(%ebx), %esi
+.LBB253_36:
+ jne .LBB253_37
+# BB#38:
+ movl $0, %edx
+ jmp .LBB253_39
+.LBB253_37:
+ movl 16(%ebx), %edx
+.LBB253_39:
+ jne .LBB253_40
+# BB#41:
+ movl $0, %edi
+ jmp .LBB253_42
+.LBB253_40:
+ movl 12(%ebx), %edi
+.LBB253_42:
+ jne .LBB253_43
+# BB#44:
+ xorl %ebx, %ebx
+ jmp .LBB253_45
+.LBB253_43:
+ movl 8(%ebx), %ebx
+.LBB253_45:
+ addl 48(%esp), %ebp # 4-byte Folded Reload
+ movl %ebp, %eax
+ movl 24(%esp), %ebp # 4-byte Reload
+ adcl 40(%esp), %ebp # 4-byte Folded Reload
+ movl %eax, 64(%ecx)
+ adcl 44(%esp), %ebx # 4-byte Folded Reload
+ movl %ebp, 68(%ecx)
+ adcl 52(%esp), %edi # 4-byte Folded Reload
+ movl %ebx, 72(%ecx)
+ adcl 56(%esp), %edx # 4-byte Folded Reload
+ movl %edi, 76(%ecx)
+ adcl 60(%esp), %esi # 4-byte Folded Reload
+ movl %edx, 80(%ecx)
+ movl (%esp), %eax # 4-byte Reload
+ adcl 64(%esp), %eax # 4-byte Folded Reload
+ movl %esi, 84(%ecx)
+ movl 4(%esp), %edx # 4-byte Reload
+ adcl 72(%esp), %edx # 4-byte Folded Reload
+ movl %eax, 88(%ecx)
+ movl 8(%esp), %eax # 4-byte Reload
+ adcl 76(%esp), %eax # 4-byte Folded Reload
+ movl %edx, 92(%ecx)
+ movl 12(%esp), %edx # 4-byte Reload
+ adcl 80(%esp), %edx # 4-byte Folded Reload
+ movl %eax, 96(%ecx)
+ movl 16(%esp), %eax # 4-byte Reload
+ adcl 84(%esp), %eax # 4-byte Folded Reload
+ movl %edx, 100(%ecx)
+ movl 20(%esp), %edx # 4-byte Reload
+ adcl 88(%esp), %edx # 4-byte Folded Reload
+ movl %eax, 104(%ecx)
+ movl 28(%esp), %eax # 4-byte Reload
+ adcl 92(%esp), %eax # 4-byte Folded Reload
+ movl %edx, 108(%ecx)
+ movl 32(%esp), %edx # 4-byte Reload
+ adcl 96(%esp), %edx # 4-byte Folded Reload
+ movl %eax, 112(%ecx)
+ movl 36(%esp), %eax # 4-byte Reload
+ adcl 100(%esp), %eax # 4-byte Folded Reload
+ movl %edx, 116(%ecx)
+ movl %eax, 120(%ecx)
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 104(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 124(%ecx)
+ addl $108, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end253:
+ .size mcl_fpDbl_sub16L, .Lfunc_end253-mcl_fpDbl_sub16L
+
+ .align 16, 0x90
+ .type .LmulPv544x32,@function
+.LmulPv544x32: # @mulPv544x32
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $120, %esp
+ movl %edx, %ebp
+ movl 140(%esp), %ebx
+ movl %ebx, %eax
+ mull 64(%ebp)
+ movl %edx, 116(%esp) # 4-byte Spill
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl %ebx, %eax
+ mull 60(%ebp)
+ movl %edx, 108(%esp) # 4-byte Spill
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl %ebx, %eax
+ mull 56(%ebp)
+ movl %edx, 100(%esp) # 4-byte Spill
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl %ebx, %eax
+ mull 52(%ebp)
+ movl %edx, 92(%esp) # 4-byte Spill
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl %ebx, %eax
+ mull 48(%ebp)
+ movl %edx, 84(%esp) # 4-byte Spill
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl %ebx, %eax
+ mull 44(%ebp)
+ movl %edx, 76(%esp) # 4-byte Spill
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl %ebx, %eax
+ mull 40(%ebp)
+ movl %edx, 68(%esp) # 4-byte Spill
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl %ebx, %eax
+ mull 36(%ebp)
+ movl %edx, 60(%esp) # 4-byte Spill
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl %ebx, %eax
+ mull 32(%ebp)
+ movl %edx, 52(%esp) # 4-byte Spill
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl %ebx, %eax
+ mull 28(%ebp)
+ movl %edx, 44(%esp) # 4-byte Spill
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl %ebx, %eax
+ mull 24(%ebp)
+ movl %edx, 36(%esp) # 4-byte Spill
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl %ebx, %eax
+ mull 20(%ebp)
+ movl %edx, 28(%esp) # 4-byte Spill
+ movl %eax, 24(%esp) # 4-byte Spill
+ movl %ebx, %eax
+ mull 16(%ebp)
+ movl %edx, 20(%esp) # 4-byte Spill
+ movl %eax, 16(%esp) # 4-byte Spill
+ movl %ebx, %eax
+ mull 12(%ebp)
+ movl %edx, 12(%esp) # 4-byte Spill
+ movl %eax, 8(%esp) # 4-byte Spill
+ movl %ebx, %eax
+ mull 8(%ebp)
+ movl %edx, %edi
+ movl %eax, 4(%esp) # 4-byte Spill
+ movl %ebx, %eax
+ mull 4(%ebp)
+ movl %edx, %esi
+ movl %eax, (%esp) # 4-byte Spill
+ movl %ebx, %eax
+ mull (%ebp)
+ movl %eax, (%ecx)
+ addl (%esp), %edx # 4-byte Folded Reload
+ movl %edx, 4(%ecx)
+ adcl 4(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 8(%ecx)
+ adcl 8(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 12(%ecx)
+ movl 12(%esp), %eax # 4-byte Reload
+ adcl 16(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 16(%ecx)
+ movl 20(%esp), %eax # 4-byte Reload
+ adcl 24(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 20(%ecx)
+ movl 28(%esp), %eax # 4-byte Reload
+ adcl 32(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 24(%ecx)
+ movl 36(%esp), %eax # 4-byte Reload
+ adcl 40(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 28(%ecx)
+ movl 44(%esp), %eax # 4-byte Reload
+ adcl 48(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 32(%ecx)
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 56(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 36(%ecx)
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 64(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 40(%ecx)
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 72(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 44(%ecx)
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 80(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 48(%ecx)
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 88(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 52(%ecx)
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 96(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 56(%ecx)
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 104(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 60(%ecx)
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 112(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 64(%ecx)
+ movl 116(%esp), %eax # 4-byte Reload
+ adcl $0, %eax
+ movl %eax, 68(%ecx)
+ movl %ecx, %eax
+ addl $120, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end254:
+ .size .LmulPv544x32, .Lfunc_end254-.LmulPv544x32
+
+ .globl mcl_fp_mulUnitPre17L
+ .align 16, 0x90
+ .type mcl_fp_mulUnitPre17L,@function
+mcl_fp_mulUnitPre17L: # @mcl_fp_mulUnitPre17L
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $140, %esp
+ calll .L255$pb
+.L255$pb:
+ popl %ebx
+.Ltmp56:
+ addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp56-.L255$pb), %ebx
+ movl 168(%esp), %eax
+ movl %eax, (%esp)
+ leal 64(%esp), %ecx
+ movl 164(%esp), %edx
+ calll .LmulPv544x32
+ movl 132(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 128(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 124(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 120(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 116(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 112(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 108(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 104(%esp), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 100(%esp), %eax
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl 96(%esp), %eax
+ movl %eax, 24(%esp) # 4-byte Spill
+ movl 92(%esp), %eax
+ movl %eax, 20(%esp) # 4-byte Spill
+ movl 88(%esp), %eax
+ movl %eax, 16(%esp) # 4-byte Spill
+ movl 84(%esp), %ebp
+ movl 80(%esp), %ebx
+ movl 76(%esp), %edi
+ movl 72(%esp), %esi
+ movl 64(%esp), %edx
+ movl 68(%esp), %ecx
+ movl 160(%esp), %eax
+ movl %edx, (%eax)
+ movl %ecx, 4(%eax)
+ movl %esi, 8(%eax)
+ movl %edi, 12(%eax)
+ movl %ebx, 16(%eax)
+ movl %ebp, 20(%eax)
+ movl 16(%esp), %ecx # 4-byte Reload
+ movl %ecx, 24(%eax)
+ movl 20(%esp), %ecx # 4-byte Reload
+ movl %ecx, 28(%eax)
+ movl 24(%esp), %ecx # 4-byte Reload
+ movl %ecx, 32(%eax)
+ movl 28(%esp), %ecx # 4-byte Reload
+ movl %ecx, 36(%eax)
+ movl 32(%esp), %ecx # 4-byte Reload
+ movl %ecx, 40(%eax)
+ movl 36(%esp), %ecx # 4-byte Reload
+ movl %ecx, 44(%eax)
+ movl 40(%esp), %ecx # 4-byte Reload
+ movl %ecx, 48(%eax)
+ movl 44(%esp), %ecx # 4-byte Reload
+ movl %ecx, 52(%eax)
+ movl 48(%esp), %ecx # 4-byte Reload
+ movl %ecx, 56(%eax)
+ movl 52(%esp), %ecx # 4-byte Reload
+ movl %ecx, 60(%eax)
+ movl 56(%esp), %ecx # 4-byte Reload
+ movl %ecx, 64(%eax)
+ movl 60(%esp), %ecx # 4-byte Reload
+ movl %ecx, 68(%eax)
+ addl $140, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end255:
+ .size mcl_fp_mulUnitPre17L, .Lfunc_end255-mcl_fp_mulUnitPre17L
+
+ .globl mcl_fpDbl_mulPre17L
+ .align 16, 0x90
+ .type mcl_fpDbl_mulPre17L,@function
+mcl_fpDbl_mulPre17L: # @mcl_fpDbl_mulPre17L
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $1356, %esp # imm = 0x54C
+ calll .L256$pb
+.L256$pb:
+ popl %edi
+.Ltmp57:
+ addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp57-.L256$pb), %edi
+ movl %edi, 124(%esp) # 4-byte Spill
+ movl 1384(%esp), %eax
+ movl (%eax), %eax
+ movl %eax, (%esp)
+ leal 1280(%esp), %ecx
+ movl 1380(%esp), %edx
+ movl %edx, %esi
+ movl %edi, %ebx
+ calll .LmulPv544x32
+ movl 1348(%esp), %eax
+ movl %eax, 116(%esp) # 4-byte Spill
+ movl 1344(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 1340(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 1336(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 1332(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 1328(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 1324(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 1320(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 1316(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 1312(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 1308(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 1304(%esp), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 1300(%esp), %eax
+ movl %eax, 24(%esp) # 4-byte Spill
+ movl 1296(%esp), %eax
+ movl %eax, 20(%esp) # 4-byte Spill
+ movl 1292(%esp), %eax
+ movl %eax, 16(%esp) # 4-byte Spill
+ movl 1288(%esp), %eax
+ movl %eax, 12(%esp) # 4-byte Spill
+ movl 1280(%esp), %eax
+ movl 1284(%esp), %ebp
+ movl 1376(%esp), %ecx
+ movl %eax, (%ecx)
+ movl 1384(%esp), %eax
+ movl 4(%eax), %eax
+ movl %eax, (%esp)
+ leal 1208(%esp), %ecx
+ movl %esi, %edx
+ movl %edi, %ebx
+ calll .LmulPv544x32
+ addl 1208(%esp), %ebp
+ movl %ebp, 8(%esp) # 4-byte Spill
+ movl 1276(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 1272(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 1268(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 1264(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 1260(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 1256(%esp), %edi
+ movl %edi, 64(%esp) # 4-byte Spill
+ movl 1252(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 1248(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 1244(%esp), %eax
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl 1240(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 1236(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 1232(%esp), %edi
+ movl 1228(%esp), %esi
+ movl 1224(%esp), %edx
+ movl 1220(%esp), %ecx
+ movl 1212(%esp), %eax
+ movl %eax, 120(%esp) # 4-byte Spill
+ movl 1216(%esp), %eax
+ movl 1376(%esp), %ebp
+ movl 8(%esp), %ebx # 4-byte Reload
+ movl %ebx, 4(%ebp)
+ movl 12(%esp), %ebp # 4-byte Reload
+ adcl %ebp, 120(%esp) # 4-byte Folded Spill
+ adcl 16(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 8(%esp) # 4-byte Spill
+ adcl 20(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 12(%esp) # 4-byte Spill
+ adcl 24(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 16(%esp) # 4-byte Spill
+ adcl 32(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 20(%esp) # 4-byte Spill
+ adcl 40(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 24(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 48(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 52(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl %eax, 28(%esp) # 4-byte Folded Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl %eax, 36(%esp) # 4-byte Folded Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl %eax, 44(%esp) # 4-byte Folded Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl %eax, 64(%esp) # 4-byte Folded Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 92(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 100(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 108(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 112(%esp), %eax # 4-byte Reload
+ adcl 116(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 112(%esp) # 4-byte Spill
+ adcl $0, 56(%esp) # 4-byte Folded Spill
+ movl 1384(%esp), %eax
+ movl 8(%eax), %eax
+ movl %eax, (%esp)
+ leal 1136(%esp), %ecx
+ movl 1380(%esp), %edx
+ movl 124(%esp), %ebx # 4-byte Reload
+ calll .LmulPv544x32
+ movl 120(%esp), %eax # 4-byte Reload
+ addl 1136(%esp), %eax
+ movl %eax, 120(%esp) # 4-byte Spill
+ movl 1204(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 1200(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 1196(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 1192(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 1188(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 1184(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 1180(%esp), %eax
+ movl %eax, 116(%esp) # 4-byte Spill
+ movl 1176(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 1172(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 1168(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 1164(%esp), %ebx
+ movl 1160(%esp), %edi
+ movl 1156(%esp), %esi
+ movl 1152(%esp), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 1148(%esp), %edx
+ movl 1140(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 1144(%esp), %ecx
+ movl 1376(%esp), %eax
+ movl 120(%esp), %ebp # 4-byte Reload
+ movl %ebp, 8(%eax)
+ movl 52(%esp), %ebp # 4-byte Reload
+ adcl 8(%esp), %ebp # 4-byte Folded Reload
+ adcl 12(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 8(%esp) # 4-byte Spill
+ adcl 16(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 12(%esp) # 4-byte Spill
+ movl 20(%esp), %eax # 4-byte Reload
+ adcl %eax, 32(%esp) # 4-byte Folded Spill
+ adcl 24(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 16(%esp) # 4-byte Spill
+ adcl 80(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 20(%esp) # 4-byte Spill
+ adcl 72(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 24(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 28(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 36(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 44(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 116(%esp), %eax # 4-byte Reload
+ adcl 64(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 116(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl %eax, 48(%esp) # 4-byte Folded Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 96(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 104(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 112(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 56(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 108(%esp) # 4-byte Spill
+ adcl $0, 40(%esp) # 4-byte Folded Spill
+ movl 1384(%esp), %eax
+ movl 12(%eax), %eax
+ movl %eax, (%esp)
+ leal 1064(%esp), %ecx
+ movl 1380(%esp), %edx
+ movl 124(%esp), %ebx # 4-byte Reload
+ calll .LmulPv544x32
+ addl 1064(%esp), %ebp
+ movl %ebp, 52(%esp) # 4-byte Spill
+ movl 1132(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 1128(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 1124(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 1120(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 1116(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 1112(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 1108(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 1104(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 1100(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 1096(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 1092(%esp), %ebx
+ movl 1088(%esp), %edi
+ movl 1084(%esp), %esi
+ movl 1080(%esp), %eax
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl 1076(%esp), %edx
+ movl 1068(%esp), %eax
+ movl %eax, 120(%esp) # 4-byte Spill
+ movl 1072(%esp), %ecx
+ movl 1376(%esp), %eax
+ movl 52(%esp), %ebp # 4-byte Reload
+ movl %ebp, 12(%eax)
+ movl 8(%esp), %eax # 4-byte Reload
+ adcl %eax, 120(%esp) # 4-byte Folded Spill
+ adcl 12(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 4(%esp) # 4-byte Spill
+ adcl 32(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 8(%esp) # 4-byte Spill
+ movl 16(%esp), %eax # 4-byte Reload
+ adcl %eax, 28(%esp) # 4-byte Folded Spill
+ adcl 20(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 12(%esp) # 4-byte Spill
+ adcl 24(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 16(%esp) # 4-byte Spill
+ adcl 60(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 20(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 68(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 76(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 116(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 112(%esp), %eax # 4-byte Reload
+ adcl 48(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl %eax, 44(%esp) # 4-byte Folded Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 92(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 100(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 108(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 40(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 104(%esp) # 4-byte Spill
+ adcl $0, 36(%esp) # 4-byte Folded Spill
+ movl 1384(%esp), %eax
+ movl 16(%eax), %eax
+ movl %eax, (%esp)
+ leal 992(%esp), %ecx
+ movl 1380(%esp), %edx
+ movl 124(%esp), %ebx # 4-byte Reload
+ calll .LmulPv544x32
+ movl 120(%esp), %eax # 4-byte Reload
+ addl 992(%esp), %eax
+ movl %eax, 120(%esp) # 4-byte Spill
+ movl 1060(%esp), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 1056(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 1052(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 1048(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 1044(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 1040(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 1036(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 1032(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 1028(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 1024(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 1020(%esp), %ebx
+ movl 1016(%esp), %edi
+ movl 1012(%esp), %esi
+ movl 1008(%esp), %eax
+ movl %eax, 24(%esp) # 4-byte Spill
+ movl 1004(%esp), %edx
+ movl 996(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 1000(%esp), %ecx
+ movl 1376(%esp), %eax
+ movl 120(%esp), %ebp # 4-byte Reload
+ movl %ebp, 16(%eax)
+ movl 52(%esp), %ebp # 4-byte Reload
+ adcl 4(%esp), %ebp # 4-byte Folded Reload
+ adcl 8(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 4(%esp) # 4-byte Spill
+ adcl 28(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 8(%esp) # 4-byte Spill
+ movl 12(%esp), %eax # 4-byte Reload
+ adcl %eax, 24(%esp) # 4-byte Folded Spill
+ adcl 16(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 12(%esp) # 4-byte Spill
+ adcl 20(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 16(%esp) # 4-byte Spill
+ adcl 56(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 20(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 64(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 72(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 112(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 44(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl %eax, 40(%esp) # 4-byte Folded Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl %eax, 48(%esp) # 4-byte Folded Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 96(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 104(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 36(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 100(%esp) # 4-byte Spill
+ adcl $0, 32(%esp) # 4-byte Folded Spill
+ movl 1384(%esp), %eax
+ movl 20(%eax), %eax
+ movl %eax, (%esp)
+ leal 920(%esp), %ecx
+ movl 1380(%esp), %edx
+ movl 124(%esp), %ebx # 4-byte Reload
+ calll .LmulPv544x32
+ addl 920(%esp), %ebp
+ movl %ebp, 52(%esp) # 4-byte Spill
+ movl 988(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 984(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 980(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 976(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 972(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 968(%esp), %eax
+ movl %eax, 116(%esp) # 4-byte Spill
+ movl 964(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 960(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 956(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 952(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 948(%esp), %ebx
+ movl 944(%esp), %edi
+ movl 940(%esp), %esi
+ movl 936(%esp), %eax
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl 932(%esp), %edx
+ movl 924(%esp), %eax
+ movl %eax, 120(%esp) # 4-byte Spill
+ movl 928(%esp), %ecx
+ movl 1376(%esp), %eax
+ movl 52(%esp), %ebp # 4-byte Reload
+ movl %ebp, 20(%eax)
+ movl 4(%esp), %eax # 4-byte Reload
+ adcl %eax, 120(%esp) # 4-byte Folded Spill
+ adcl 8(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 4(%esp) # 4-byte Spill
+ adcl 24(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 8(%esp) # 4-byte Spill
+ movl 12(%esp), %eax # 4-byte Reload
+ adcl %eax, 28(%esp) # 4-byte Folded Spill
+ adcl 16(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 16(%esp) # 4-byte Spill
+ adcl 20(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 20(%esp) # 4-byte Spill
+ adcl 60(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 24(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 68(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 76(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 108(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 40(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 116(%esp), %eax # 4-byte Reload
+ adcl 48(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 116(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl %eax, 44(%esp) # 4-byte Folded Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 92(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 100(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 32(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 96(%esp) # 4-byte Spill
+ adcl $0, 36(%esp) # 4-byte Folded Spill
+ movl 1384(%esp), %eax
+ movl 24(%eax), %eax
+ movl %eax, (%esp)
+ leal 848(%esp), %ecx
+ movl 1380(%esp), %edx
+ movl 124(%esp), %ebx # 4-byte Reload
+ calll .LmulPv544x32
+ movl 120(%esp), %eax # 4-byte Reload
+ addl 848(%esp), %eax
+ movl %eax, 120(%esp) # 4-byte Spill
+ movl 916(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 912(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 908(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 904(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 900(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 896(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 892(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 888(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 884(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 880(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 876(%esp), %ebx
+ movl 872(%esp), %edi
+ movl 868(%esp), %esi
+ movl 864(%esp), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 860(%esp), %edx
+ movl 852(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 856(%esp), %ecx
+ movl 1376(%esp), %eax
+ movl 120(%esp), %ebp # 4-byte Reload
+ movl %ebp, 24(%eax)
+ movl 52(%esp), %ebp # 4-byte Reload
+ adcl 4(%esp), %ebp # 4-byte Folded Reload
+ adcl 8(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 8(%esp) # 4-byte Spill
+ adcl 28(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 12(%esp) # 4-byte Spill
+ movl 16(%esp), %eax # 4-byte Reload
+ adcl %eax, 32(%esp) # 4-byte Folded Spill
+ adcl 20(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 16(%esp) # 4-byte Spill
+ adcl 24(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 20(%esp) # 4-byte Spill
+ adcl 56(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 24(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 64(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 72(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 104(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 116(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 112(%esp), %eax # 4-byte Reload
+ adcl 44(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl %eax, 48(%esp) # 4-byte Folded Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 88(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 96(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 36(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 100(%esp) # 4-byte Spill
+ adcl $0, 40(%esp) # 4-byte Folded Spill
+ movl 1384(%esp), %eax
+ movl 28(%eax), %eax
+ movl %eax, (%esp)
+ leal 776(%esp), %ecx
+ movl 1380(%esp), %edx
+ movl 124(%esp), %ebx # 4-byte Reload
+ calll .LmulPv544x32
+ addl 776(%esp), %ebp
+ movl %ebp, 52(%esp) # 4-byte Spill
+ movl 844(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 840(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 836(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 832(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 828(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 824(%esp), %eax
+ movl %eax, 116(%esp) # 4-byte Spill
+ movl 820(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 816(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 812(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 808(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 804(%esp), %ebx
+ movl 800(%esp), %edi
+ movl 796(%esp), %esi
+ movl 792(%esp), %eax
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl 788(%esp), %edx
+ movl 780(%esp), %eax
+ movl %eax, 120(%esp) # 4-byte Spill
+ movl 784(%esp), %ecx
+ movl 1376(%esp), %eax
+ movl 52(%esp), %ebp # 4-byte Reload
+ movl %ebp, 28(%eax)
+ movl 8(%esp), %eax # 4-byte Reload
+ adcl %eax, 120(%esp) # 4-byte Folded Spill
+ adcl 12(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 8(%esp) # 4-byte Spill
+ adcl 32(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 12(%esp) # 4-byte Spill
+ movl 16(%esp), %eax # 4-byte Reload
+ adcl %eax, 28(%esp) # 4-byte Folded Spill
+ adcl 20(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 16(%esp) # 4-byte Spill
+ adcl 24(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 20(%esp) # 4-byte Spill
+ adcl 60(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 24(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 68(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 76(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 108(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 112(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 116(%esp), %eax # 4-byte Reload
+ adcl 48(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 116(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl %eax, 44(%esp) # 4-byte Folded Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 92(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 100(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 40(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 96(%esp) # 4-byte Spill
+ adcl $0, 36(%esp) # 4-byte Folded Spill
+ movl 1384(%esp), %eax
+ movl 32(%eax), %eax
+ movl %eax, (%esp)
+ leal 704(%esp), %ecx
+ movl 1380(%esp), %edx
+ movl 124(%esp), %ebx # 4-byte Reload
+ calll .LmulPv544x32
+ movl 120(%esp), %eax # 4-byte Reload
+ addl 704(%esp), %eax
+ movl %eax, 120(%esp) # 4-byte Spill
+ movl 772(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 768(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 764(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 760(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 756(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 752(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 748(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 744(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 740(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 736(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 732(%esp), %ebx
+ movl 728(%esp), %edi
+ movl 724(%esp), %esi
+ movl 720(%esp), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 716(%esp), %edx
+ movl 708(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 712(%esp), %ecx
+ movl 1376(%esp), %eax
+ movl 120(%esp), %ebp # 4-byte Reload
+ movl %ebp, 32(%eax)
+ movl 52(%esp), %ebp # 4-byte Reload
+ adcl 8(%esp), %ebp # 4-byte Folded Reload
+ adcl 12(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 8(%esp) # 4-byte Spill
+ adcl 28(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 12(%esp) # 4-byte Spill
+ movl 16(%esp), %eax # 4-byte Reload
+ adcl %eax, 32(%esp) # 4-byte Folded Spill
+ adcl 20(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 16(%esp) # 4-byte Spill
+ adcl 24(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 20(%esp) # 4-byte Spill
+ adcl 56(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 24(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 64(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 72(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 104(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 116(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 112(%esp), %eax # 4-byte Reload
+ adcl 44(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl %eax, 48(%esp) # 4-byte Folded Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 88(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 96(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 36(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 100(%esp) # 4-byte Spill
+ adcl $0, 40(%esp) # 4-byte Folded Spill
+ movl 1384(%esp), %eax
+ movl 36(%eax), %eax
+ movl %eax, (%esp)
+ leal 632(%esp), %ecx
+ movl 1380(%esp), %edx
+ movl 124(%esp), %ebx # 4-byte Reload
+ calll .LmulPv544x32
+ addl 632(%esp), %ebp
+ movl %ebp, 52(%esp) # 4-byte Spill
+ movl 700(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 696(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 692(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 688(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 684(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 680(%esp), %eax
+ movl %eax, 116(%esp) # 4-byte Spill
+ movl 676(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 672(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 668(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 664(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 660(%esp), %ebx
+ movl 656(%esp), %edi
+ movl 652(%esp), %esi
+ movl 648(%esp), %eax
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl 644(%esp), %edx
+ movl 636(%esp), %eax
+ movl %eax, 120(%esp) # 4-byte Spill
+ movl 640(%esp), %ecx
+ movl 1376(%esp), %eax
+ movl 52(%esp), %ebp # 4-byte Reload
+ movl %ebp, 36(%eax)
+ movl 8(%esp), %eax # 4-byte Reload
+ adcl %eax, 120(%esp) # 4-byte Folded Spill
+ adcl 12(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 8(%esp) # 4-byte Spill
+ adcl 32(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 12(%esp) # 4-byte Spill
+ movl 16(%esp), %eax # 4-byte Reload
+ adcl %eax, 28(%esp) # 4-byte Folded Spill
+ adcl 20(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 16(%esp) # 4-byte Spill
+ adcl 24(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 20(%esp) # 4-byte Spill
+ adcl 60(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 52(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 68(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 76(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 108(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 112(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 116(%esp), %eax # 4-byte Reload
+ adcl 48(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 116(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl %eax, 44(%esp) # 4-byte Folded Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 92(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 100(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 40(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 96(%esp) # 4-byte Spill
+ adcl $0, 36(%esp) # 4-byte Folded Spill
+ movl 1384(%esp), %eax
+ movl 40(%eax), %eax
+ movl %eax, (%esp)
+ leal 560(%esp), %ecx
+ movl 1380(%esp), %edx
+ movl 124(%esp), %ebx # 4-byte Reload
+ calll .LmulPv544x32
+ movl 120(%esp), %eax # 4-byte Reload
+ addl 560(%esp), %eax
+ movl %eax, 120(%esp) # 4-byte Spill
+ movl 628(%esp), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 624(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 620(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 616(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 612(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 608(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 604(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 600(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 596(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 592(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 588(%esp), %ebx
+ movl 584(%esp), %edi
+ movl 580(%esp), %esi
+ movl 576(%esp), %eax
+ movl %eax, 24(%esp) # 4-byte Spill
+ movl 572(%esp), %edx
+ movl 564(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 568(%esp), %ecx
+ movl 1376(%esp), %eax
+ movl 120(%esp), %ebp # 4-byte Reload
+ movl %ebp, 40(%eax)
+ movl 48(%esp), %ebp # 4-byte Reload
+ adcl 8(%esp), %ebp # 4-byte Folded Reload
+ adcl 12(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 8(%esp) # 4-byte Spill
+ adcl 28(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 12(%esp) # 4-byte Spill
+ movl 16(%esp), %eax # 4-byte Reload
+ adcl %eax, 24(%esp) # 4-byte Folded Spill
+ adcl 20(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 16(%esp) # 4-byte Spill
+ adcl 52(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 20(%esp) # 4-byte Spill
+ adcl 56(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 56(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 64(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 72(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 104(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 116(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 112(%esp), %eax # 4-byte Reload
+ adcl 44(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl %eax, 40(%esp) # 4-byte Folded Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 88(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 96(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 36(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 100(%esp) # 4-byte Spill
+ adcl $0, 32(%esp) # 4-byte Folded Spill
+ movl 1384(%esp), %eax
+ movl 44(%eax), %eax
+ movl %eax, (%esp)
+ leal 488(%esp), %ecx
+ movl 1380(%esp), %edx
+ movl 124(%esp), %ebx # 4-byte Reload
+ calll .LmulPv544x32
+ addl 488(%esp), %ebp
+ movl %ebp, 48(%esp) # 4-byte Spill
+ movl 556(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 552(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 548(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 544(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 540(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 536(%esp), %eax
+ movl %eax, 116(%esp) # 4-byte Spill
+ movl 532(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 528(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 524(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 520(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 516(%esp), %ebx
+ movl 512(%esp), %edi
+ movl 508(%esp), %esi
+ movl 504(%esp), %eax
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl 500(%esp), %edx
+ movl 492(%esp), %eax
+ movl %eax, 120(%esp) # 4-byte Spill
+ movl 496(%esp), %ecx
+ movl 1376(%esp), %eax
+ movl 48(%esp), %ebp # 4-byte Reload
+ movl %ebp, 44(%eax)
+ movl 8(%esp), %eax # 4-byte Reload
+ adcl %eax, 120(%esp) # 4-byte Folded Spill
+ adcl 12(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 8(%esp) # 4-byte Spill
+ adcl 24(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 12(%esp) # 4-byte Spill
+ movl 16(%esp), %eax # 4-byte Reload
+ adcl %eax, 28(%esp) # 4-byte Folded Spill
+ adcl 20(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 16(%esp) # 4-byte Spill
+ adcl 56(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 20(%esp) # 4-byte Spill
+ adcl 60(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 24(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 68(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 76(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 108(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 112(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 116(%esp), %eax # 4-byte Reload
+ adcl 40(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 116(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl %eax, 44(%esp) # 4-byte Folded Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 92(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 100(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 32(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 96(%esp) # 4-byte Spill
+ adcl $0, 36(%esp) # 4-byte Folded Spill
+ movl 1384(%esp), %eax
+ movl 48(%eax), %eax
+ movl %eax, (%esp)
+ leal 416(%esp), %ecx
+ movl 1380(%esp), %edx
+ movl 124(%esp), %ebx # 4-byte Reload
+ calll .LmulPv544x32
+ movl 120(%esp), %eax # 4-byte Reload
+ addl 416(%esp), %eax
+ movl %eax, 120(%esp) # 4-byte Spill
+ movl 484(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 480(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 476(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 472(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 468(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 464(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 460(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 456(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 452(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 448(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 444(%esp), %ebx
+ movl 440(%esp), %edi
+ movl 436(%esp), %esi
+ movl 432(%esp), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 428(%esp), %edx
+ movl 420(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 424(%esp), %ecx
+ movl 1376(%esp), %eax
+ movl 120(%esp), %ebp # 4-byte Reload
+ movl %ebp, 48(%eax)
+ movl 48(%esp), %ebp # 4-byte Reload
+ adcl 8(%esp), %ebp # 4-byte Folded Reload
+ adcl 12(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 8(%esp) # 4-byte Spill
+ adcl 28(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 12(%esp) # 4-byte Spill
+ movl 16(%esp), %eax # 4-byte Reload
+ adcl %eax, 32(%esp) # 4-byte Folded Spill
+ adcl 20(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 16(%esp) # 4-byte Spill
+ adcl 24(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 20(%esp) # 4-byte Spill
+ adcl 52(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 24(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 64(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 72(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 104(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 116(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 112(%esp), %eax # 4-byte Reload
+ adcl 44(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 80(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 88(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 96(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 36(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 100(%esp) # 4-byte Spill
+ adcl $0, 40(%esp) # 4-byte Folded Spill
+ movl 1384(%esp), %eax
+ movl 52(%eax), %eax
+ movl %eax, (%esp)
+ leal 344(%esp), %ecx
+ movl 1380(%esp), %edx
+ movl 124(%esp), %ebx # 4-byte Reload
+ calll .LmulPv544x32
+ addl 344(%esp), %ebp
+ movl %ebp, 48(%esp) # 4-byte Spill
+ movl 412(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 408(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 404(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 400(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 396(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 392(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 388(%esp), %eax
+ movl %eax, 116(%esp) # 4-byte Spill
+ movl 384(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 380(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 376(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 372(%esp), %ebx
+ movl 368(%esp), %edi
+ movl 364(%esp), %esi
+ movl 360(%esp), %eax
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl 356(%esp), %edx
+ movl 348(%esp), %eax
+ movl %eax, 120(%esp) # 4-byte Spill
+ movl 352(%esp), %ecx
+ movl 1376(%esp), %eax
+ movl 48(%esp), %ebp # 4-byte Reload
+ movl %ebp, 52(%eax)
+ movl 8(%esp), %eax # 4-byte Reload
+ adcl %eax, 120(%esp) # 4-byte Folded Spill
+ adcl 12(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 4(%esp) # 4-byte Spill
+ adcl 32(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 8(%esp) # 4-byte Spill
+ movl 16(%esp), %eax # 4-byte Reload
+ adcl %eax, 28(%esp) # 4-byte Folded Spill
+ adcl 20(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 12(%esp) # 4-byte Spill
+ adcl 24(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 16(%esp) # 4-byte Spill
+ adcl 56(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 20(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 60(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 68(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 108(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 116(%esp), %eax # 4-byte Reload
+ adcl 112(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 116(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl %eax, 44(%esp) # 4-byte Folded Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 84(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 92(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 100(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 40(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 104(%esp) # 4-byte Spill
+ adcl $0, 36(%esp) # 4-byte Folded Spill
+ movl 1384(%esp), %eax
+ movl 56(%eax), %eax
+ movl %eax, (%esp)
+ leal 272(%esp), %ecx
+ movl 1380(%esp), %eax
+ movl %eax, %edx
+ movl 124(%esp), %ebx # 4-byte Reload
+ calll .LmulPv544x32
+ movl 120(%esp), %eax # 4-byte Reload
+ addl 272(%esp), %eax
+ movl %eax, 120(%esp) # 4-byte Spill
+ movl 340(%esp), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 336(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 332(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 328(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 324(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 320(%esp), %esi
+ movl %esi, 48(%esp) # 4-byte Spill
+ movl 316(%esp), %esi
+ movl %esi, 112(%esp) # 4-byte Spill
+ movl 312(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 308(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 304(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 300(%esp), %ebx
+ movl 296(%esp), %edi
+ movl 292(%esp), %edx
+ movl 288(%esp), %esi
+ movl %esi, 24(%esp) # 4-byte Spill
+ movl 284(%esp), %esi
+ movl %esi, 76(%esp) # 4-byte Spill
+ movl 276(%esp), %eax
+ movl 280(%esp), %ecx
+ movl 120(%esp), %esi # 4-byte Reload
+ movl 1376(%esp), %ebp
+ movl %esi, 56(%ebp)
+ adcl 4(%esp), %eax # 4-byte Folded Reload
+ movl %eax, %ebp
+ adcl 8(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 8(%esp) # 4-byte Spill
+ movl 76(%esp), %esi # 4-byte Reload
+ adcl 28(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 76(%esp) # 4-byte Spill
+ movl 12(%esp), %eax # 4-byte Reload
+ adcl %eax, 24(%esp) # 4-byte Folded Spill
+ adcl 16(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 16(%esp) # 4-byte Spill
+ adcl 20(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 20(%esp) # 4-byte Spill
+ adcl 52(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 28(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 64(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 72(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 116(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 112(%esp), %esi # 4-byte Reload
+ adcl 44(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 112(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl %eax, 48(%esp) # 4-byte Folded Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 88(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 96(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 104(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 36(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 108(%esp) # 4-byte Spill
+ adcl $0, 32(%esp) # 4-byte Folded Spill
+ movl 1384(%esp), %ecx
+ movl %ecx, %eax
+ movl 60(%eax), %eax
+ movl %eax, (%esp)
+ leal 200(%esp), %ecx
+ movl 1380(%esp), %edx
+ movl 124(%esp), %ebx # 4-byte Reload
+ calll .LmulPv544x32
+ addl 200(%esp), %ebp
+ movl %ebp, 12(%esp) # 4-byte Spill
+ movl 268(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 264(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 260(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 256(%esp), %eax
+ movl %eax, 116(%esp) # 4-byte Spill
+ movl 252(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 248(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 244(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 240(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 236(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 232(%esp), %edi
+ movl 228(%esp), %esi
+ movl 224(%esp), %edx
+ movl 220(%esp), %ecx
+ movl 216(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 212(%esp), %eax
+ movl 204(%esp), %ebp
+ movl %ebp, 120(%esp) # 4-byte Spill
+ movl 208(%esp), %ebp
+ movl %ebp, 64(%esp) # 4-byte Spill
+ movl 12(%esp), %ebp # 4-byte Reload
+ movl 1376(%esp), %ebx
+ movl %ebp, 60(%ebx)
+ movl 120(%esp), %ebp # 4-byte Reload
+ adcl 8(%esp), %ebp # 4-byte Folded Reload
+ movl %ebp, 120(%esp) # 4-byte Spill
+ movl 64(%esp), %ebp # 4-byte Reload
+ adcl 76(%esp), %ebp # 4-byte Folded Reload
+ adcl 24(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 16(%esp), %eax # 4-byte Reload
+ adcl %eax, 36(%esp) # 4-byte Folded Spill
+ adcl 20(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 24(%esp) # 4-byte Spill
+ adcl 28(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 28(%esp) # 4-byte Spill
+ adcl 56(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 56(%esp) # 4-byte Spill
+ adcl 60(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 60(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 68(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 112(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 48(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 84(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 92(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 116(%esp), %eax # 4-byte Reload
+ adcl 100(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 116(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl %eax, 40(%esp) # 4-byte Folded Spill
+ movl 32(%esp), %eax # 4-byte Reload
+ adcl %eax, 44(%esp) # 4-byte Folded Spill
+ adcl $0, 52(%esp) # 4-byte Folded Spill
+ movl 1384(%esp), %eax
+ movl 64(%eax), %eax
+ movl %eax, (%esp)
+ leal 128(%esp), %ecx
+ movl 1380(%esp), %edx
+ movl 124(%esp), %ebx # 4-byte Reload
+ calll .LmulPv544x32
+ movl 120(%esp), %eax # 4-byte Reload
+ addl 128(%esp), %eax
+ movl %eax, 120(%esp) # 4-byte Spill
+ adcl 132(%esp), %ebp
+ movl %ebp, 64(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 136(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 196(%esp), %eax
+ movl %eax, 124(%esp) # 4-byte Spill
+ movl 192(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 188(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 184(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 180(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 176(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 172(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 168(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 164(%esp), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 160(%esp), %eax
+ movl %eax, 20(%esp) # 4-byte Spill
+ movl 156(%esp), %ebx
+ movl 152(%esp), %edi
+ movl 148(%esp), %esi
+ movl 144(%esp), %edx
+ movl 140(%esp), %ecx
+ movl 1376(%esp), %eax
+ movl 120(%esp), %ebp # 4-byte Reload
+ movl %ebp, 64(%eax)
+ movl 64(%esp), %ebp # 4-byte Reload
+ movl %ebp, 68(%eax)
+ adcl 36(%esp), %ecx # 4-byte Folded Reload
+ movl 76(%esp), %ebp # 4-byte Reload
+ movl %ebp, 72(%eax)
+ adcl 24(%esp), %edx # 4-byte Folded Reload
+ movl %ecx, 76(%eax)
+ adcl 28(%esp), %esi # 4-byte Folded Reload
+ movl %edx, 80(%eax)
+ adcl 56(%esp), %edi # 4-byte Folded Reload
+ movl %esi, 84(%eax)
+ adcl 60(%esp), %ebx # 4-byte Folded Reload
+ movl %edi, 88(%eax)
+ movl 20(%esp), %edx # 4-byte Reload
+ adcl 72(%esp), %edx # 4-byte Folded Reload
+ movl %ebx, 92(%eax)
+ movl 32(%esp), %ecx # 4-byte Reload
+ adcl 80(%esp), %ecx # 4-byte Folded Reload
+ movl %edx, 96(%eax)
+ movl 48(%esp), %edx # 4-byte Reload
+ adcl 88(%esp), %edx # 4-byte Folded Reload
+ movl %ecx, 100(%eax)
+ movl 68(%esp), %ecx # 4-byte Reload
+ adcl 96(%esp), %ecx # 4-byte Folded Reload
+ movl %edx, 104(%eax)
+ movl 84(%esp), %edx # 4-byte Reload
+ adcl 104(%esp), %edx # 4-byte Folded Reload
+ movl %ecx, 108(%eax)
+ movl 92(%esp), %ecx # 4-byte Reload
+ adcl 116(%esp), %ecx # 4-byte Folded Reload
+ movl %edx, 112(%eax)
+ movl 100(%esp), %edx # 4-byte Reload
+ adcl 40(%esp), %edx # 4-byte Folded Reload
+ movl %ecx, 116(%eax)
+ movl 108(%esp), %ecx # 4-byte Reload
+ adcl 44(%esp), %ecx # 4-byte Folded Reload
+ movl %edx, 120(%eax)
+ movl %ecx, 124(%eax)
+ movl 112(%esp), %ecx # 4-byte Reload
+ adcl 52(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 128(%eax)
+ movl 124(%esp), %ecx # 4-byte Reload
+ adcl $0, %ecx
+ movl %ecx, 132(%eax)
+ addl $1356, %esp # imm = 0x54C
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end256:
+ .size mcl_fpDbl_mulPre17L, .Lfunc_end256-mcl_fpDbl_mulPre17L
+
+ .globl mcl_fpDbl_sqrPre17L
+ .align 16, 0x90
+ .type mcl_fpDbl_sqrPre17L,@function
+mcl_fpDbl_sqrPre17L: # @mcl_fpDbl_sqrPre17L
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $1356, %esp # imm = 0x54C
+ calll .L257$pb
+.L257$pb:
+ popl %ebx
+.Ltmp58:
+ addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp58-.L257$pb), %ebx
+ movl %ebx, 124(%esp) # 4-byte Spill
+ movl 1380(%esp), %edx
+ movl (%edx), %eax
+ movl %eax, (%esp)
+ leal 1280(%esp), %ecx
+ movl %edx, %edi
+ movl %ebx, %esi
+ calll .LmulPv544x32
+ movl 1348(%esp), %eax
+ movl %eax, 116(%esp) # 4-byte Spill
+ movl 1344(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 1340(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 1336(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 1332(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 1328(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 1324(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 1320(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 1316(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 1312(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 1308(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 1304(%esp), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 1300(%esp), %eax
+ movl %eax, 24(%esp) # 4-byte Spill
+ movl 1296(%esp), %eax
+ movl %eax, 20(%esp) # 4-byte Spill
+ movl 1292(%esp), %eax
+ movl %eax, 16(%esp) # 4-byte Spill
+ movl 1288(%esp), %eax
+ movl %eax, 12(%esp) # 4-byte Spill
+ movl 1280(%esp), %eax
+ movl 1284(%esp), %ebp
+ movl 1376(%esp), %ecx
+ movl %eax, (%ecx)
+ movl %edi, %edx
+ movl 4(%edx), %eax
+ movl %eax, (%esp)
+ leal 1208(%esp), %ecx
+ movl %esi, %ebx
+ calll .LmulPv544x32
+ addl 1208(%esp), %ebp
+ movl %ebp, 8(%esp) # 4-byte Spill
+ movl 1276(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 1272(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 1268(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 1264(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 1260(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 1256(%esp), %edi
+ movl %edi, 64(%esp) # 4-byte Spill
+ movl 1252(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 1248(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 1244(%esp), %eax
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl 1240(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 1236(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 1232(%esp), %edi
+ movl 1228(%esp), %esi
+ movl 1224(%esp), %edx
+ movl 1220(%esp), %ecx
+ movl 1212(%esp), %eax
+ movl %eax, 120(%esp) # 4-byte Spill
+ movl 1216(%esp), %eax
+ movl 1376(%esp), %ebp
+ movl 8(%esp), %ebx # 4-byte Reload
+ movl %ebx, 4(%ebp)
+ movl 12(%esp), %ebp # 4-byte Reload
+ adcl %ebp, 120(%esp) # 4-byte Folded Spill
+ adcl 16(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 8(%esp) # 4-byte Spill
+ adcl 20(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 12(%esp) # 4-byte Spill
+ adcl 24(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 16(%esp) # 4-byte Spill
+ adcl 32(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 20(%esp) # 4-byte Spill
+ adcl 40(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 24(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 48(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 52(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl %eax, 28(%esp) # 4-byte Folded Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl %eax, 36(%esp) # 4-byte Folded Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl %eax, 44(%esp) # 4-byte Folded Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl %eax, 64(%esp) # 4-byte Folded Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 92(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 100(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 108(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 112(%esp), %eax # 4-byte Reload
+ adcl 116(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 112(%esp) # 4-byte Spill
+ adcl $0, 56(%esp) # 4-byte Folded Spill
+ movl 1380(%esp), %edx
+ movl 8(%edx), %eax
+ movl %eax, (%esp)
+ leal 1136(%esp), %ecx
+ movl 124(%esp), %ebx # 4-byte Reload
+ calll .LmulPv544x32
+ movl 120(%esp), %eax # 4-byte Reload
+ addl 1136(%esp), %eax
+ movl %eax, 120(%esp) # 4-byte Spill
+ movl 1204(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 1200(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 1196(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 1192(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 1188(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 1184(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 1180(%esp), %eax
+ movl %eax, 116(%esp) # 4-byte Spill
+ movl 1176(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 1172(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 1168(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 1164(%esp), %ebx
+ movl 1160(%esp), %edi
+ movl 1156(%esp), %esi
+ movl 1152(%esp), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 1148(%esp), %edx
+ movl 1140(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 1144(%esp), %ecx
+ movl 1376(%esp), %eax
+ movl 120(%esp), %ebp # 4-byte Reload
+ movl %ebp, 8(%eax)
+ movl 52(%esp), %ebp # 4-byte Reload
+ adcl 8(%esp), %ebp # 4-byte Folded Reload
+ adcl 12(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 8(%esp) # 4-byte Spill
+ adcl 16(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 12(%esp) # 4-byte Spill
+ movl 20(%esp), %eax # 4-byte Reload
+ adcl %eax, 32(%esp) # 4-byte Folded Spill
+ adcl 24(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 16(%esp) # 4-byte Spill
+ adcl 80(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 20(%esp) # 4-byte Spill
+ adcl 72(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 24(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 28(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 36(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 44(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 116(%esp), %eax # 4-byte Reload
+ adcl 64(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 116(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl %eax, 48(%esp) # 4-byte Folded Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 96(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 104(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 112(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 56(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 108(%esp) # 4-byte Spill
+ adcl $0, 40(%esp) # 4-byte Folded Spill
+ movl 1380(%esp), %edx
+ movl 12(%edx), %eax
+ movl %eax, (%esp)
+ leal 1064(%esp), %ecx
+ movl 124(%esp), %ebx # 4-byte Reload
+ calll .LmulPv544x32
+ addl 1064(%esp), %ebp
+ movl %ebp, 52(%esp) # 4-byte Spill
+ movl 1132(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 1128(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 1124(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 1120(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 1116(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 1112(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 1108(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 1104(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 1100(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 1096(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 1092(%esp), %ebx
+ movl 1088(%esp), %edi
+ movl 1084(%esp), %esi
+ movl 1080(%esp), %eax
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl 1076(%esp), %edx
+ movl 1068(%esp), %eax
+ movl %eax, 120(%esp) # 4-byte Spill
+ movl 1072(%esp), %ecx
+ movl 1376(%esp), %eax
+ movl 52(%esp), %ebp # 4-byte Reload
+ movl %ebp, 12(%eax)
+ movl 8(%esp), %eax # 4-byte Reload
+ adcl %eax, 120(%esp) # 4-byte Folded Spill
+ adcl 12(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 8(%esp) # 4-byte Spill
+ adcl 32(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 12(%esp) # 4-byte Spill
+ movl 16(%esp), %eax # 4-byte Reload
+ adcl %eax, 28(%esp) # 4-byte Folded Spill
+ adcl 20(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 16(%esp) # 4-byte Spill
+ adcl 24(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 20(%esp) # 4-byte Spill
+ adcl 60(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 24(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 68(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 76(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 116(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 112(%esp), %eax # 4-byte Reload
+ adcl 48(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl %eax, 44(%esp) # 4-byte Folded Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 92(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 100(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 108(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 40(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 104(%esp) # 4-byte Spill
+ adcl $0, 36(%esp) # 4-byte Folded Spill
+ movl 1380(%esp), %edx
+ movl 16(%edx), %eax
+ movl %eax, (%esp)
+ leal 992(%esp), %ecx
+ movl 124(%esp), %ebx # 4-byte Reload
+ calll .LmulPv544x32
+ movl 120(%esp), %eax # 4-byte Reload
+ addl 992(%esp), %eax
+ movl %eax, 120(%esp) # 4-byte Spill
+ movl 1060(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 1056(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 1052(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 1048(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 1044(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 1040(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 1036(%esp), %eax
+ movl %eax, 116(%esp) # 4-byte Spill
+ movl 1032(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 1028(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 1024(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 1020(%esp), %ebx
+ movl 1016(%esp), %edi
+ movl 1012(%esp), %esi
+ movl 1008(%esp), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 1004(%esp), %edx
+ movl 996(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 1000(%esp), %ecx
+ movl 1376(%esp), %eax
+ movl 120(%esp), %ebp # 4-byte Reload
+ movl %ebp, 16(%eax)
+ movl 52(%esp), %ebp # 4-byte Reload
+ adcl 8(%esp), %ebp # 4-byte Folded Reload
+ adcl 12(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 8(%esp) # 4-byte Spill
+ adcl 28(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 12(%esp) # 4-byte Spill
+ movl 16(%esp), %eax # 4-byte Reload
+ adcl %eax, 32(%esp) # 4-byte Folded Spill
+ adcl 20(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 16(%esp) # 4-byte Spill
+ adcl 24(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 20(%esp) # 4-byte Spill
+ adcl 56(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 24(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 64(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 72(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 112(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 116(%esp), %eax # 4-byte Reload
+ adcl 44(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 116(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl %eax, 48(%esp) # 4-byte Folded Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 88(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 96(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 104(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 36(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 108(%esp) # 4-byte Spill
+ adcl $0, 40(%esp) # 4-byte Folded Spill
+ movl 1380(%esp), %edx
+ movl 20(%edx), %eax
+ movl %eax, (%esp)
+ leal 920(%esp), %ecx
+ movl 124(%esp), %ebx # 4-byte Reload
+ calll .LmulPv544x32
+ addl 920(%esp), %ebp
+ movl %ebp, 52(%esp) # 4-byte Spill
+ movl 988(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 984(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 980(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 976(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 972(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 968(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 964(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 960(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 956(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 952(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 948(%esp), %ebx
+ movl 944(%esp), %edi
+ movl 940(%esp), %esi
+ movl 936(%esp), %eax
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl 932(%esp), %edx
+ movl 924(%esp), %eax
+ movl %eax, 120(%esp) # 4-byte Spill
+ movl 928(%esp), %ecx
+ movl 1376(%esp), %eax
+ movl 52(%esp), %ebp # 4-byte Reload
+ movl %ebp, 20(%eax)
+ movl 8(%esp), %eax # 4-byte Reload
+ adcl %eax, 120(%esp) # 4-byte Folded Spill
+ adcl 12(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 8(%esp) # 4-byte Spill
+ adcl 32(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 12(%esp) # 4-byte Spill
+ movl 16(%esp), %eax # 4-byte Reload
+ adcl %eax, 28(%esp) # 4-byte Folded Spill
+ adcl 20(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 16(%esp) # 4-byte Spill
+ adcl 24(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 20(%esp) # 4-byte Spill
+ adcl 60(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 24(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 68(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 76(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 116(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 112(%esp), %eax # 4-byte Reload
+ adcl 48(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl %eax, 44(%esp) # 4-byte Folded Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 92(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 100(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 108(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 40(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 104(%esp) # 4-byte Spill
+ adcl $0, 36(%esp) # 4-byte Folded Spill
+ movl 1380(%esp), %edx
+ movl 24(%edx), %eax
+ movl %eax, (%esp)
+ leal 848(%esp), %ecx
+ movl 124(%esp), %ebx # 4-byte Reload
+ calll .LmulPv544x32
+ movl 120(%esp), %eax # 4-byte Reload
+ addl 848(%esp), %eax
+ movl %eax, 120(%esp) # 4-byte Spill
+ movl 916(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 912(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 908(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 904(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 900(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 896(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 892(%esp), %eax
+ movl %eax, 116(%esp) # 4-byte Spill
+ movl 888(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 884(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 880(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 876(%esp), %ebx
+ movl 872(%esp), %edi
+ movl 868(%esp), %esi
+ movl 864(%esp), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 860(%esp), %edx
+ movl 852(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 856(%esp), %ecx
+ movl 1376(%esp), %eax
+ movl 120(%esp), %ebp # 4-byte Reload
+ movl %ebp, 24(%eax)
+ movl 52(%esp), %ebp # 4-byte Reload
+ adcl 8(%esp), %ebp # 4-byte Folded Reload
+ adcl 12(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 8(%esp) # 4-byte Spill
+ adcl 28(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 12(%esp) # 4-byte Spill
+ movl 16(%esp), %eax # 4-byte Reload
+ adcl %eax, 32(%esp) # 4-byte Folded Spill
+ adcl 20(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 16(%esp) # 4-byte Spill
+ adcl 24(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 20(%esp) # 4-byte Spill
+ adcl 56(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 24(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 64(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 72(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 112(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 116(%esp), %eax # 4-byte Reload
+ adcl 44(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 116(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl %eax, 48(%esp) # 4-byte Folded Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 88(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 96(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 104(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 36(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 108(%esp) # 4-byte Spill
+ adcl $0, 40(%esp) # 4-byte Folded Spill
+ movl 1380(%esp), %edx
+ movl 28(%edx), %eax
+ movl %eax, (%esp)
+ leal 776(%esp), %ecx
+ movl 124(%esp), %ebx # 4-byte Reload
+ calll .LmulPv544x32
+ addl 776(%esp), %ebp
+ movl %ebp, 52(%esp) # 4-byte Spill
+ movl 844(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 840(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 836(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 832(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 828(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 824(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 820(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 816(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 812(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 808(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 804(%esp), %ebx
+ movl 800(%esp), %edi
+ movl 796(%esp), %esi
+ movl 792(%esp), %eax
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl 788(%esp), %edx
+ movl 780(%esp), %eax
+ movl %eax, 120(%esp) # 4-byte Spill
+ movl 784(%esp), %ecx
+ movl 1376(%esp), %eax
+ movl 52(%esp), %ebp # 4-byte Reload
+ movl %ebp, 28(%eax)
+ movl 8(%esp), %eax # 4-byte Reload
+ adcl %eax, 120(%esp) # 4-byte Folded Spill
+ adcl 12(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 8(%esp) # 4-byte Spill
+ adcl 32(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 12(%esp) # 4-byte Spill
+ movl 16(%esp), %eax # 4-byte Reload
+ adcl %eax, 28(%esp) # 4-byte Folded Spill
+ adcl 20(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 16(%esp) # 4-byte Spill
+ adcl 24(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 20(%esp) # 4-byte Spill
+ adcl 60(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 24(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 68(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 76(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 116(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 112(%esp), %eax # 4-byte Reload
+ adcl 48(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl %eax, 44(%esp) # 4-byte Folded Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 92(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 100(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 108(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 40(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 104(%esp) # 4-byte Spill
+ adcl $0, 36(%esp) # 4-byte Folded Spill
+ movl 1380(%esp), %edx
+ movl 32(%edx), %eax
+ movl %eax, (%esp)
+ leal 704(%esp), %ecx
+ movl 124(%esp), %ebx # 4-byte Reload
+ calll .LmulPv544x32
+ movl 120(%esp), %eax # 4-byte Reload
+ addl 704(%esp), %eax
+ movl %eax, 120(%esp) # 4-byte Spill
+ movl 772(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 768(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 764(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 760(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 756(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 752(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 748(%esp), %eax
+ movl %eax, 116(%esp) # 4-byte Spill
+ movl 744(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 740(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 736(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 732(%esp), %ebx
+ movl 728(%esp), %edi
+ movl 724(%esp), %esi
+ movl 720(%esp), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 716(%esp), %edx
+ movl 708(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 712(%esp), %ecx
+ movl 1376(%esp), %eax
+ movl 120(%esp), %ebp # 4-byte Reload
+ movl %ebp, 32(%eax)
+ movl 52(%esp), %ebp # 4-byte Reload
+ adcl 8(%esp), %ebp # 4-byte Folded Reload
+ adcl 12(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 8(%esp) # 4-byte Spill
+ adcl 28(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 12(%esp) # 4-byte Spill
+ movl 16(%esp), %eax # 4-byte Reload
+ adcl %eax, 32(%esp) # 4-byte Folded Spill
+ adcl 20(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 16(%esp) # 4-byte Spill
+ adcl 24(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 20(%esp) # 4-byte Spill
+ adcl 56(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 24(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 64(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 72(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 112(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 116(%esp), %eax # 4-byte Reload
+ adcl 44(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 116(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl %eax, 48(%esp) # 4-byte Folded Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 88(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 96(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 104(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 36(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 108(%esp) # 4-byte Spill
+ adcl $0, 40(%esp) # 4-byte Folded Spill
+ movl 1380(%esp), %edx
+ movl 36(%edx), %eax
+ movl %eax, (%esp)
+ leal 632(%esp), %ecx
+ movl 124(%esp), %ebx # 4-byte Reload
+ calll .LmulPv544x32
+ addl 632(%esp), %ebp
+ movl %ebp, 52(%esp) # 4-byte Spill
+ movl 700(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 696(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 692(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 688(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 684(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 680(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 676(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 672(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 668(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 664(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 660(%esp), %ebx
+ movl 656(%esp), %edi
+ movl 652(%esp), %esi
+ movl 648(%esp), %eax
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl 644(%esp), %edx
+ movl 636(%esp), %eax
+ movl %eax, 120(%esp) # 4-byte Spill
+ movl 640(%esp), %ecx
+ movl 1376(%esp), %eax
+ movl 52(%esp), %ebp # 4-byte Reload
+ movl %ebp, 36(%eax)
+ movl 8(%esp), %eax # 4-byte Reload
+ adcl %eax, 120(%esp) # 4-byte Folded Spill
+ adcl 12(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 8(%esp) # 4-byte Spill
+ adcl 32(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 12(%esp) # 4-byte Spill
+ movl 16(%esp), %eax # 4-byte Reload
+ adcl %eax, 28(%esp) # 4-byte Folded Spill
+ adcl 20(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 16(%esp) # 4-byte Spill
+ adcl 24(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 20(%esp) # 4-byte Spill
+ adcl 60(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 52(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 68(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 76(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 116(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 112(%esp), %eax # 4-byte Reload
+ adcl 48(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl %eax, 44(%esp) # 4-byte Folded Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 92(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 100(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 108(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 40(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 104(%esp) # 4-byte Spill
+ adcl $0, 36(%esp) # 4-byte Folded Spill
+ movl 1380(%esp), %edx
+ movl 40(%edx), %eax
+ movl %eax, (%esp)
+ leal 560(%esp), %ecx
+ movl 124(%esp), %ebx # 4-byte Reload
+ calll .LmulPv544x32
+ movl 120(%esp), %eax # 4-byte Reload
+ addl 560(%esp), %eax
+ movl %eax, 120(%esp) # 4-byte Spill
+ movl 628(%esp), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 624(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 620(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 616(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 612(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 608(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 604(%esp), %eax
+ movl %eax, 116(%esp) # 4-byte Spill
+ movl 600(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 596(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 592(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 588(%esp), %ebx
+ movl 584(%esp), %edi
+ movl 580(%esp), %esi
+ movl 576(%esp), %eax
+ movl %eax, 24(%esp) # 4-byte Spill
+ movl 572(%esp), %edx
+ movl 564(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 568(%esp), %ecx
+ movl 1376(%esp), %eax
+ movl 120(%esp), %ebp # 4-byte Reload
+ movl %ebp, 40(%eax)
+ movl 48(%esp), %ebp # 4-byte Reload
+ adcl 8(%esp), %ebp # 4-byte Folded Reload
+ adcl 12(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 8(%esp) # 4-byte Spill
+ adcl 28(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 12(%esp) # 4-byte Spill
+ movl 16(%esp), %eax # 4-byte Reload
+ adcl %eax, 24(%esp) # 4-byte Folded Spill
+ adcl 20(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 16(%esp) # 4-byte Spill
+ adcl 52(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 20(%esp) # 4-byte Spill
+ adcl 56(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 56(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 64(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 72(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 112(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 116(%esp), %eax # 4-byte Reload
+ adcl 44(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 116(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl %eax, 40(%esp) # 4-byte Folded Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 88(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 96(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 104(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 36(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 108(%esp) # 4-byte Spill
+ adcl $0, 32(%esp) # 4-byte Folded Spill
+ movl 1380(%esp), %edx
+ movl 44(%edx), %eax
+ movl %eax, (%esp)
+ leal 488(%esp), %ecx
+ movl 124(%esp), %ebx # 4-byte Reload
+ calll .LmulPv544x32
+ addl 488(%esp), %ebp
+ movl %ebp, 48(%esp) # 4-byte Spill
+ movl 556(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 552(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 548(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 544(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 540(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 536(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 532(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 528(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 524(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 520(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 516(%esp), %ebx
+ movl 512(%esp), %edi
+ movl 508(%esp), %esi
+ movl 504(%esp), %eax
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl 500(%esp), %edx
+ movl 492(%esp), %eax
+ movl %eax, 120(%esp) # 4-byte Spill
+ movl 496(%esp), %ecx
+ movl 1376(%esp), %eax
+ movl 48(%esp), %ebp # 4-byte Reload
+ movl %ebp, 44(%eax)
+ movl 8(%esp), %eax # 4-byte Reload
+ adcl %eax, 120(%esp) # 4-byte Folded Spill
+ adcl 12(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 4(%esp) # 4-byte Spill
+ adcl 24(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 8(%esp) # 4-byte Spill
+ movl 16(%esp), %eax # 4-byte Reload
+ adcl %eax, 28(%esp) # 4-byte Folded Spill
+ adcl 20(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 12(%esp) # 4-byte Spill
+ adcl 56(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 16(%esp) # 4-byte Spill
+ adcl 60(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 20(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 68(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 76(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 116(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 112(%esp), %eax # 4-byte Reload
+ adcl 40(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl %eax, 44(%esp) # 4-byte Folded Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 92(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 100(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 108(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 32(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 104(%esp) # 4-byte Spill
+ adcl $0, 36(%esp) # 4-byte Folded Spill
+ movl 1380(%esp), %edx
+ movl 48(%edx), %eax
+ movl %eax, (%esp)
+ leal 416(%esp), %ecx
+ movl 124(%esp), %ebx # 4-byte Reload
+ calll .LmulPv544x32
+ movl 120(%esp), %eax # 4-byte Reload
+ addl 416(%esp), %eax
+ movl %eax, 120(%esp) # 4-byte Spill
+ movl 484(%esp), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 480(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 476(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 472(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 468(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 464(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 460(%esp), %eax
+ movl %eax, 116(%esp) # 4-byte Spill
+ movl 456(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 452(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 448(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 444(%esp), %ebx
+ movl 440(%esp), %edi
+ movl 436(%esp), %esi
+ movl 432(%esp), %eax
+ movl %eax, 24(%esp) # 4-byte Spill
+ movl 428(%esp), %edx
+ movl 420(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 424(%esp), %ecx
+ movl 1376(%esp), %eax
+ movl 120(%esp), %ebp # 4-byte Reload
+ movl %ebp, 48(%eax)
+ movl 48(%esp), %ebp # 4-byte Reload
+ adcl 4(%esp), %ebp # 4-byte Folded Reload
+ adcl 8(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 4(%esp) # 4-byte Spill
+ adcl 28(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 8(%esp) # 4-byte Spill
+ movl 12(%esp), %eax # 4-byte Reload
+ adcl %eax, 24(%esp) # 4-byte Folded Spill
+ adcl 16(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 12(%esp) # 4-byte Spill
+ adcl 20(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 16(%esp) # 4-byte Spill
+ adcl 52(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 20(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 64(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 72(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 112(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 116(%esp), %eax # 4-byte Reload
+ adcl 44(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 116(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl %eax, 40(%esp) # 4-byte Folded Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 88(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 96(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 104(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 36(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 108(%esp) # 4-byte Spill
+ adcl $0, 32(%esp) # 4-byte Folded Spill
+ movl 1380(%esp), %edx
+ movl 52(%edx), %eax
+ movl %eax, (%esp)
+ leal 344(%esp), %ecx
+ movl 124(%esp), %ebx # 4-byte Reload
+ calll .LmulPv544x32
+ addl 344(%esp), %ebp
+ movl %ebp, 48(%esp) # 4-byte Spill
+ movl 412(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 408(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 404(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 400(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 396(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 392(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 388(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 384(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 380(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 376(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 372(%esp), %ebx
+ movl 368(%esp), %edi
+ movl 364(%esp), %esi
+ movl 360(%esp), %eax
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl 356(%esp), %edx
+ movl 348(%esp), %eax
+ movl %eax, 120(%esp) # 4-byte Spill
+ movl 352(%esp), %ecx
+ movl 1376(%esp), %eax
+ movl 48(%esp), %ebp # 4-byte Reload
+ movl %ebp, 52(%eax)
+ movl 4(%esp), %eax # 4-byte Reload
+ adcl %eax, 120(%esp) # 4-byte Folded Spill
+ adcl 8(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 4(%esp) # 4-byte Spill
+ adcl 24(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 8(%esp) # 4-byte Spill
+ movl 12(%esp), %eax # 4-byte Reload
+ adcl %eax, 28(%esp) # 4-byte Folded Spill
+ adcl 16(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 12(%esp) # 4-byte Spill
+ adcl 20(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 16(%esp) # 4-byte Spill
+ adcl 56(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 20(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 60(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 76(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 116(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 112(%esp), %eax # 4-byte Reload
+ adcl 40(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl %eax, 44(%esp) # 4-byte Folded Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 92(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 100(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 108(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 32(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 104(%esp) # 4-byte Spill
+ adcl $0, 36(%esp) # 4-byte Folded Spill
+ movl 1380(%esp), %edx
+ movl 56(%edx), %eax
+ movl %eax, (%esp)
+ leal 272(%esp), %ecx
+ movl 124(%esp), %ebx # 4-byte Reload
+ calll .LmulPv544x32
+ movl 120(%esp), %eax # 4-byte Reload
+ addl 272(%esp), %eax
+ movl %eax, 120(%esp) # 4-byte Spill
+ movl 340(%esp), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 336(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 332(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 328(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 324(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 320(%esp), %esi
+ movl %esi, 48(%esp) # 4-byte Spill
+ movl 316(%esp), %esi
+ movl %esi, 116(%esp) # 4-byte Spill
+ movl 312(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 308(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 304(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 300(%esp), %ebx
+ movl 296(%esp), %edi
+ movl 292(%esp), %edx
+ movl 288(%esp), %esi
+ movl %esi, 24(%esp) # 4-byte Spill
+ movl 284(%esp), %esi
+ movl %esi, 76(%esp) # 4-byte Spill
+ movl 276(%esp), %eax
+ movl 280(%esp), %ecx
+ movl 120(%esp), %esi # 4-byte Reload
+ movl 1376(%esp), %ebp
+ movl %esi, 56(%ebp)
+ adcl 4(%esp), %eax # 4-byte Folded Reload
+ movl %eax, %ebp
+ adcl 8(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 8(%esp) # 4-byte Spill
+ movl 76(%esp), %esi # 4-byte Reload
+ adcl 28(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 76(%esp) # 4-byte Spill
+ movl 12(%esp), %eax # 4-byte Reload
+ adcl %eax, 24(%esp) # 4-byte Folded Spill
+ adcl 16(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 16(%esp) # 4-byte Spill
+ adcl 20(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 20(%esp) # 4-byte Spill
+ adcl 52(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 28(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 68(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 72(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 112(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 116(%esp), %esi # 4-byte Reload
+ adcl 44(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 116(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl %eax, 48(%esp) # 4-byte Folded Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 88(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 96(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 104(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 36(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 108(%esp) # 4-byte Spill
+ adcl $0, 32(%esp) # 4-byte Folded Spill
+ movl 1380(%esp), %edx
+ movl 60(%edx), %eax
+ movl %eax, (%esp)
+ leal 200(%esp), %ecx
+ movl 124(%esp), %ebx # 4-byte Reload
+ calll .LmulPv544x32
+ addl 200(%esp), %ebp
+ movl %ebp, 12(%esp) # 4-byte Spill
+ movl 268(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 264(%esp), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 260(%esp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 256(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 252(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 248(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 244(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 240(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 236(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 232(%esp), %edi
+ movl 228(%esp), %esi
+ movl 224(%esp), %edx
+ movl 220(%esp), %ecx
+ movl 216(%esp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 212(%esp), %eax
+ movl 204(%esp), %ebp
+ movl %ebp, 120(%esp) # 4-byte Spill
+ movl 208(%esp), %ebp
+ movl %ebp, 68(%esp) # 4-byte Spill
+ movl 12(%esp), %ebp # 4-byte Reload
+ movl 1376(%esp), %ebx
+ movl %ebp, 60(%ebx)
+ movl 120(%esp), %ebp # 4-byte Reload
+ adcl 8(%esp), %ebp # 4-byte Folded Reload
+ movl %ebp, 120(%esp) # 4-byte Spill
+ movl 68(%esp), %ebp # 4-byte Reload
+ adcl 76(%esp), %ebp # 4-byte Folded Reload
+ adcl 24(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 16(%esp), %eax # 4-byte Reload
+ adcl %eax, 36(%esp) # 4-byte Folded Spill
+ adcl 20(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 24(%esp) # 4-byte Spill
+ adcl 28(%esp), %edx # 4-byte Folded Reload
+ movl %edx, 28(%esp) # 4-byte Spill
+ adcl 56(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 56(%esp) # 4-byte Spill
+ adcl 60(%esp), %edi # 4-byte Folded Reload
+ movl %edi, 60(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 64(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 116(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 48(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 84(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 92(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 112(%esp), %eax # 4-byte Reload
+ adcl 100(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl %eax, 40(%esp) # 4-byte Folded Spill
+ movl 32(%esp), %eax # 4-byte Reload
+ adcl %eax, 44(%esp) # 4-byte Folded Spill
+ adcl $0, 52(%esp) # 4-byte Folded Spill
+ movl 1380(%esp), %edx
+ movl 64(%edx), %eax
+ movl %eax, (%esp)
+ leal 128(%esp), %ecx
+ movl 124(%esp), %ebx # 4-byte Reload
+ calll .LmulPv544x32
+ movl 120(%esp), %eax # 4-byte Reload
+ addl 128(%esp), %eax
+ movl %eax, 120(%esp) # 4-byte Spill
+ adcl 132(%esp), %ebp
+ movl %ebp, 68(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 136(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 196(%esp), %eax
+ movl %eax, 124(%esp) # 4-byte Spill
+ movl 192(%esp), %eax
+ movl %eax, 116(%esp) # 4-byte Spill
+ movl 188(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 184(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 180(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 176(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 172(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 168(%esp), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 164(%esp), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 160(%esp), %eax
+ movl %eax, 20(%esp) # 4-byte Spill
+ movl 156(%esp), %ebx
+ movl 152(%esp), %edi
+ movl 148(%esp), %esi
+ movl 144(%esp), %edx
+ movl 140(%esp), %ecx
+ movl 1376(%esp), %eax
+ movl 120(%esp), %ebp # 4-byte Reload
+ movl %ebp, 64(%eax)
+ movl 68(%esp), %ebp # 4-byte Reload
+ movl %ebp, 68(%eax)
+ adcl 36(%esp), %ecx # 4-byte Folded Reload
+ movl 76(%esp), %ebp # 4-byte Reload
+ movl %ebp, 72(%eax)
+ adcl 24(%esp), %edx # 4-byte Folded Reload
+ movl %ecx, 76(%eax)
+ adcl 28(%esp), %esi # 4-byte Folded Reload
+ movl %edx, 80(%eax)
+ adcl 56(%esp), %edi # 4-byte Folded Reload
+ movl %esi, 84(%eax)
+ adcl 60(%esp), %ebx # 4-byte Folded Reload
+ movl %edi, 88(%eax)
+ movl 20(%esp), %edx # 4-byte Reload
+ adcl 72(%esp), %edx # 4-byte Folded Reload
+ movl %ebx, 92(%eax)
+ movl 32(%esp), %ecx # 4-byte Reload
+ adcl 80(%esp), %ecx # 4-byte Folded Reload
+ movl %edx, 96(%eax)
+ movl 48(%esp), %edx # 4-byte Reload
+ adcl 88(%esp), %edx # 4-byte Folded Reload
+ movl %ecx, 100(%eax)
+ movl 64(%esp), %ecx # 4-byte Reload
+ adcl 96(%esp), %ecx # 4-byte Folded Reload
+ movl %edx, 104(%eax)
+ movl 84(%esp), %edx # 4-byte Reload
+ adcl 104(%esp), %edx # 4-byte Folded Reload
+ movl %ecx, 108(%eax)
+ movl 92(%esp), %ecx # 4-byte Reload
+ adcl 112(%esp), %ecx # 4-byte Folded Reload
+ movl %edx, 112(%eax)
+ movl 100(%esp), %edx # 4-byte Reload
+ adcl 40(%esp), %edx # 4-byte Folded Reload
+ movl %ecx, 116(%eax)
+ movl 108(%esp), %ecx # 4-byte Reload
+ adcl 44(%esp), %ecx # 4-byte Folded Reload
+ movl %edx, 120(%eax)
+ movl %ecx, 124(%eax)
+ movl 116(%esp), %ecx # 4-byte Reload
+ adcl 52(%esp), %ecx # 4-byte Folded Reload
+ movl %ecx, 128(%eax)
+ movl 124(%esp), %ecx # 4-byte Reload
+ adcl $0, %ecx
+ movl %ecx, 132(%eax)
+ addl $1356, %esp # imm = 0x54C
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end257:
+ .size mcl_fpDbl_sqrPre17L, .Lfunc_end257-mcl_fpDbl_sqrPre17L
+
+ .globl mcl_fp_mont17L
+ .align 16, 0x90
+ .type mcl_fp_mont17L,@function
+mcl_fp_mont17L: # @mcl_fp_mont17L
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $2588, %esp # imm = 0xA1C
+ calll .L258$pb
+.L258$pb:
+ popl %ebx
+.Ltmp59:
+ addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp59-.L258$pb), %ebx
+ movl 2620(%esp), %eax
+ movl -4(%eax), %esi
+ movl %esi, 60(%esp) # 4-byte Spill
+ movl 2616(%esp), %eax
+ movl (%eax), %eax
+ movl %eax, (%esp)
+ leal 2512(%esp), %ecx
+ movl 2612(%esp), %edx
+ calll .LmulPv544x32
+ movl 2512(%esp), %ebp
+ movl 2516(%esp), %eax
+ movl %eax, 120(%esp) # 4-byte Spill
+ movl %ebp, %eax
+ imull %esi, %eax
+ movl 2580(%esp), %ecx
+ movl %ecx, 132(%esp) # 4-byte Spill
+ movl 2576(%esp), %ecx
+ movl %ecx, 128(%esp) # 4-byte Spill
+ movl 2572(%esp), %ecx
+ movl %ecx, 124(%esp) # 4-byte Spill
+ movl 2568(%esp), %ecx
+ movl %ecx, 108(%esp) # 4-byte Spill
+ movl 2564(%esp), %ecx
+ movl %ecx, 104(%esp) # 4-byte Spill
+ movl 2560(%esp), %ecx
+ movl %ecx, 96(%esp) # 4-byte Spill
+ movl 2556(%esp), %ecx
+ movl %ecx, 84(%esp) # 4-byte Spill
+ movl 2552(%esp), %ecx
+ movl %ecx, 72(%esp) # 4-byte Spill
+ movl 2548(%esp), %ecx
+ movl %ecx, 64(%esp) # 4-byte Spill
+ movl 2544(%esp), %ecx
+ movl %ecx, 68(%esp) # 4-byte Spill
+ movl 2540(%esp), %ecx
+ movl %ecx, 76(%esp) # 4-byte Spill
+ movl 2536(%esp), %ecx
+ movl %ecx, 92(%esp) # 4-byte Spill
+ movl 2532(%esp), %edi
+ movl 2528(%esp), %ecx
+ movl %ecx, 88(%esp) # 4-byte Spill
+ movl 2524(%esp), %esi
+ movl 2520(%esp), %ecx
+ movl %ecx, 100(%esp) # 4-byte Spill
+ movl %eax, (%esp)
+ leal 2440(%esp), %ecx
+ movl 2620(%esp), %edx
+ calll .LmulPv544x32
+ addl 2440(%esp), %ebp
+ movl 120(%esp), %eax # 4-byte Reload
+ adcl 2444(%esp), %eax
+ movl %eax, 120(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 2448(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ adcl 2452(%esp), %esi
+ movl %esi, 112(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 2456(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ adcl 2460(%esp), %edi
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 2464(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 2468(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 2472(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 2476(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 2480(%esp), %eax
+ movl %eax, %esi
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 2484(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 2488(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 2492(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 2496(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 124(%esp), %eax # 4-byte Reload
+ adcl 2500(%esp), %eax
+ movl %eax, 124(%esp) # 4-byte Spill
+ movl 128(%esp), %eax # 4-byte Reload
+ adcl 2504(%esp), %eax
+ movl %eax, 128(%esp) # 4-byte Spill
+ movl 132(%esp), %eax # 4-byte Reload
+ adcl 2508(%esp), %eax
+ movl %eax, 132(%esp) # 4-byte Spill
+ sbbl %ebp, %ebp
+ movl 2616(%esp), %eax
+ movl 4(%eax), %eax
+ movl %eax, (%esp)
+ leal 2368(%esp), %ecx
+ movl 2612(%esp), %edx
+ calll .LmulPv544x32
+ andl $1, %ebp
+ movl 120(%esp), %ecx # 4-byte Reload
+ addl 2368(%esp), %ecx
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 2372(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 112(%esp), %eax # 4-byte Reload
+ adcl 2376(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 2380(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ adcl 2384(%esp), %edi
+ movl %edi, 80(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 2388(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 76(%esp), %edi # 4-byte Reload
+ adcl 2392(%esp), %edi
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 2396(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 2400(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ adcl 2404(%esp), %esi
+ movl %esi, 72(%esp) # 4-byte Spill
+ movl 84(%esp), %esi # 4-byte Reload
+ adcl 2408(%esp), %esi
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 2412(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 2416(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 2420(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 124(%esp), %eax # 4-byte Reload
+ adcl 2424(%esp), %eax
+ movl %eax, 124(%esp) # 4-byte Spill
+ movl 128(%esp), %eax # 4-byte Reload
+ adcl 2428(%esp), %eax
+ movl %eax, 128(%esp) # 4-byte Spill
+ movl 132(%esp), %eax # 4-byte Reload
+ adcl 2432(%esp), %eax
+ movl %eax, 132(%esp) # 4-byte Spill
+ adcl 2436(%esp), %ebp
+ movl %ebp, 120(%esp) # 4-byte Spill
+ sbbl %eax, %eax
+ movl %eax, 116(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ movl %ecx, %ebp
+ imull 60(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 2296(%esp), %ecx
+ movl 2620(%esp), %eax
+ movl %eax, %edx
+ calll .LmulPv544x32
+ movl 116(%esp), %eax # 4-byte Reload
+ andl $1, %eax
+ addl 2296(%esp), %ebp
+ movl 100(%esp), %ecx # 4-byte Reload
+ adcl 2300(%esp), %ecx
+ movl %ecx, 100(%esp) # 4-byte Spill
+ movl 112(%esp), %ecx # 4-byte Reload
+ adcl 2304(%esp), %ecx
+ movl %ecx, 112(%esp) # 4-byte Spill
+ movl 88(%esp), %ecx # 4-byte Reload
+ adcl 2308(%esp), %ecx
+ movl %ecx, 88(%esp) # 4-byte Spill
+ movl 80(%esp), %ecx # 4-byte Reload
+ adcl 2312(%esp), %ecx
+ movl %ecx, 80(%esp) # 4-byte Spill
+ movl 92(%esp), %ecx # 4-byte Reload
+ adcl 2316(%esp), %ecx
+ movl %ecx, 92(%esp) # 4-byte Spill
+ adcl 2320(%esp), %edi
+ movl %edi, 76(%esp) # 4-byte Spill
+ movl 68(%esp), %ecx # 4-byte Reload
+ adcl 2324(%esp), %ecx
+ movl %ecx, 68(%esp) # 4-byte Spill
+ movl 64(%esp), %ecx # 4-byte Reload
+ adcl 2328(%esp), %ecx
+ movl %ecx, 64(%esp) # 4-byte Spill
+ movl 72(%esp), %ecx # 4-byte Reload
+ adcl 2332(%esp), %ecx
+ movl %ecx, 72(%esp) # 4-byte Spill
+ adcl 2336(%esp), %esi
+ movl %esi, 84(%esp) # 4-byte Spill
+ movl 96(%esp), %ecx # 4-byte Reload
+ adcl 2340(%esp), %ecx
+ movl %ecx, 96(%esp) # 4-byte Spill
+ movl 104(%esp), %ecx # 4-byte Reload
+ adcl 2344(%esp), %ecx
+ movl %ecx, 104(%esp) # 4-byte Spill
+ movl 108(%esp), %esi # 4-byte Reload
+ adcl 2348(%esp), %esi
+ movl 124(%esp), %ecx # 4-byte Reload
+ adcl 2352(%esp), %ecx
+ movl %ecx, 124(%esp) # 4-byte Spill
+ movl 128(%esp), %ecx # 4-byte Reload
+ adcl 2356(%esp), %ecx
+ movl %ecx, 128(%esp) # 4-byte Spill
+ movl 132(%esp), %ecx # 4-byte Reload
+ adcl 2360(%esp), %ecx
+ movl %ecx, 132(%esp) # 4-byte Spill
+ movl 120(%esp), %ebp # 4-byte Reload
+ adcl 2364(%esp), %ebp
+ adcl $0, %eax
+ movl %eax, %edi
+ movl 2616(%esp), %eax
+ movl 8(%eax), %eax
+ movl %eax, (%esp)
+ leal 2224(%esp), %ecx
+ movl 2612(%esp), %edx
+ calll .LmulPv544x32
+ movl 100(%esp), %ecx # 4-byte Reload
+ addl 2224(%esp), %ecx
+ movl 112(%esp), %eax # 4-byte Reload
+ adcl 2228(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 2232(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 2236(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 2240(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 2244(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 2248(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 2252(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 2256(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 2260(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 2264(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 2268(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ adcl 2272(%esp), %esi
+ movl %esi, 108(%esp) # 4-byte Spill
+ movl 124(%esp), %eax # 4-byte Reload
+ adcl 2276(%esp), %eax
+ movl %eax, 124(%esp) # 4-byte Spill
+ movl 128(%esp), %eax # 4-byte Reload
+ adcl 2280(%esp), %eax
+ movl %eax, 128(%esp) # 4-byte Spill
+ movl 132(%esp), %eax # 4-byte Reload
+ adcl 2284(%esp), %eax
+ movl %eax, 132(%esp) # 4-byte Spill
+ adcl 2288(%esp), %ebp
+ movl %ebp, 120(%esp) # 4-byte Spill
+ adcl 2292(%esp), %edi
+ movl %edi, 116(%esp) # 4-byte Spill
+ sbbl %esi, %esi
+ movl %ecx, %edi
+ movl %edi, %eax
+ imull 60(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 2152(%esp), %ecx
+ movl 2620(%esp), %edx
+ calll .LmulPv544x32
+ andl $1, %esi
+ movl %esi, %ecx
+ addl 2152(%esp), %edi
+ movl 112(%esp), %eax # 4-byte Reload
+ adcl 2156(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 2160(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 2164(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 2168(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 2172(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 2176(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 2180(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 2184(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 2188(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 2192(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 2196(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 2200(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 124(%esp), %ebp # 4-byte Reload
+ adcl 2204(%esp), %ebp
+ movl 128(%esp), %edi # 4-byte Reload
+ adcl 2208(%esp), %edi
+ movl 132(%esp), %esi # 4-byte Reload
+ adcl 2212(%esp), %esi
+ movl 120(%esp), %eax # 4-byte Reload
+ adcl 2216(%esp), %eax
+ movl %eax, 120(%esp) # 4-byte Spill
+ movl 116(%esp), %eax # 4-byte Reload
+ adcl 2220(%esp), %eax
+ movl %eax, 116(%esp) # 4-byte Spill
+ adcl $0, %ecx
+ movl %ecx, 100(%esp) # 4-byte Spill
+ movl 2616(%esp), %eax
+ movl 12(%eax), %eax
+ movl %eax, (%esp)
+ leal 2080(%esp), %ecx
+ movl 2612(%esp), %edx
+ calll .LmulPv544x32
+ movl 112(%esp), %ecx # 4-byte Reload
+ addl 2080(%esp), %ecx
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 2084(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 2088(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 2092(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 2096(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 2100(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 2104(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 2108(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 2112(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 2116(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 2120(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 2124(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ adcl 2128(%esp), %ebp
+ movl %ebp, 124(%esp) # 4-byte Spill
+ adcl 2132(%esp), %edi
+ movl %edi, 128(%esp) # 4-byte Spill
+ adcl 2136(%esp), %esi
+ movl %esi, 132(%esp) # 4-byte Spill
+ movl 120(%esp), %eax # 4-byte Reload
+ adcl 2140(%esp), %eax
+ movl %eax, 120(%esp) # 4-byte Spill
+ movl 116(%esp), %eax # 4-byte Reload
+ adcl 2144(%esp), %eax
+ movl %eax, 116(%esp) # 4-byte Spill
+ movl 100(%esp), %esi # 4-byte Reload
+ adcl 2148(%esp), %esi
+ sbbl %ebp, %ebp
+ movl %ecx, %edi
+ movl %edi, %eax
+ imull 60(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 2008(%esp), %ecx
+ movl 2620(%esp), %edx
+ calll .LmulPv544x32
+ movl %ebp, %eax
+ andl $1, %eax
+ addl 2008(%esp), %edi
+ movl 88(%esp), %ecx # 4-byte Reload
+ adcl 2012(%esp), %ecx
+ movl %ecx, 88(%esp) # 4-byte Spill
+ movl 80(%esp), %ecx # 4-byte Reload
+ adcl 2016(%esp), %ecx
+ movl %ecx, 80(%esp) # 4-byte Spill
+ movl 92(%esp), %ecx # 4-byte Reload
+ adcl 2020(%esp), %ecx
+ movl %ecx, 92(%esp) # 4-byte Spill
+ movl 76(%esp), %ecx # 4-byte Reload
+ adcl 2024(%esp), %ecx
+ movl %ecx, 76(%esp) # 4-byte Spill
+ movl 68(%esp), %ecx # 4-byte Reload
+ adcl 2028(%esp), %ecx
+ movl %ecx, 68(%esp) # 4-byte Spill
+ movl 64(%esp), %ecx # 4-byte Reload
+ adcl 2032(%esp), %ecx
+ movl %ecx, 64(%esp) # 4-byte Spill
+ movl 72(%esp), %ecx # 4-byte Reload
+ adcl 2036(%esp), %ecx
+ movl %ecx, 72(%esp) # 4-byte Spill
+ movl 84(%esp), %ecx # 4-byte Reload
+ adcl 2040(%esp), %ecx
+ movl %ecx, 84(%esp) # 4-byte Spill
+ movl 96(%esp), %edi # 4-byte Reload
+ adcl 2044(%esp), %edi
+ movl 104(%esp), %ecx # 4-byte Reload
+ adcl 2048(%esp), %ecx
+ movl %ecx, 104(%esp) # 4-byte Spill
+ movl 108(%esp), %ecx # 4-byte Reload
+ adcl 2052(%esp), %ecx
+ movl %ecx, 108(%esp) # 4-byte Spill
+ movl 124(%esp), %ecx # 4-byte Reload
+ adcl 2056(%esp), %ecx
+ movl %ecx, 124(%esp) # 4-byte Spill
+ movl 128(%esp), %ecx # 4-byte Reload
+ adcl 2060(%esp), %ecx
+ movl %ecx, 128(%esp) # 4-byte Spill
+ movl 132(%esp), %ecx # 4-byte Reload
+ adcl 2064(%esp), %ecx
+ movl %ecx, 132(%esp) # 4-byte Spill
+ movl 120(%esp), %ecx # 4-byte Reload
+ adcl 2068(%esp), %ecx
+ movl %ecx, 120(%esp) # 4-byte Spill
+ movl 116(%esp), %ebp # 4-byte Reload
+ adcl 2072(%esp), %ebp
+ adcl 2076(%esp), %esi
+ movl %esi, 100(%esp) # 4-byte Spill
+ adcl $0, %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 2616(%esp), %eax
+ movl 16(%eax), %eax
+ movl %eax, (%esp)
+ leal 1936(%esp), %ecx
+ movl 2612(%esp), %edx
+ calll .LmulPv544x32
+ movl 88(%esp), %ecx # 4-byte Reload
+ addl 1936(%esp), %ecx
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 1940(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 1944(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 1948(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 1952(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 64(%esp), %esi # 4-byte Reload
+ adcl 1956(%esp), %esi
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 1960(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 1964(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ adcl 1968(%esp), %edi
+ movl %edi, 96(%esp) # 4-byte Spill
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 1972(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 1976(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 124(%esp), %eax # 4-byte Reload
+ adcl 1980(%esp), %eax
+ movl %eax, 124(%esp) # 4-byte Spill
+ movl 128(%esp), %eax # 4-byte Reload
+ adcl 1984(%esp), %eax
+ movl %eax, 128(%esp) # 4-byte Spill
+ movl 132(%esp), %eax # 4-byte Reload
+ adcl 1988(%esp), %eax
+ movl %eax, 132(%esp) # 4-byte Spill
+ movl 120(%esp), %eax # 4-byte Reload
+ adcl 1992(%esp), %eax
+ movl %eax, 120(%esp) # 4-byte Spill
+ adcl 1996(%esp), %ebp
+ movl %ebp, 116(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 2000(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 112(%esp), %eax # 4-byte Reload
+ adcl 2004(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ sbbl %ebp, %ebp
+ movl %ecx, %edi
+ movl %edi, %eax
+ imull 60(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 1864(%esp), %ecx
+ movl 2620(%esp), %eax
+ movl %eax, %edx
+ calll .LmulPv544x32
+ andl $1, %ebp
+ movl %ebp, %ecx
+ addl 1864(%esp), %edi
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 1868(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 1872(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 1876(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 68(%esp), %edi # 4-byte Reload
+ adcl 1880(%esp), %edi
+ adcl 1884(%esp), %esi
+ movl %esi, 64(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 1888(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 1892(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 96(%esp), %esi # 4-byte Reload
+ adcl 1896(%esp), %esi
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 1900(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 1904(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 124(%esp), %eax # 4-byte Reload
+ adcl 1908(%esp), %eax
+ movl %eax, 124(%esp) # 4-byte Spill
+ movl 128(%esp), %eax # 4-byte Reload
+ adcl 1912(%esp), %eax
+ movl %eax, 128(%esp) # 4-byte Spill
+ movl 132(%esp), %ebp # 4-byte Reload
+ adcl 1916(%esp), %ebp
+ movl 120(%esp), %eax # 4-byte Reload
+ adcl 1920(%esp), %eax
+ movl %eax, 120(%esp) # 4-byte Spill
+ movl 116(%esp), %eax # 4-byte Reload
+ adcl 1924(%esp), %eax
+ movl %eax, 116(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 1928(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 112(%esp), %eax # 4-byte Reload
+ adcl 1932(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ adcl $0, %ecx
+ movl %ecx, 88(%esp) # 4-byte Spill
+ movl 2616(%esp), %eax
+ movl 20(%eax), %eax
+ movl %eax, (%esp)
+ leal 1792(%esp), %ecx
+ movl 2612(%esp), %edx
+ calll .LmulPv544x32
+ movl 80(%esp), %ecx # 4-byte Reload
+ addl 1792(%esp), %ecx
+ movl %ecx, 80(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 1796(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 1800(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ adcl 1804(%esp), %edi
+ movl %edi, 68(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 1808(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 1812(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 1816(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ adcl 1820(%esp), %esi
+ movl %esi, 96(%esp) # 4-byte Spill
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 1824(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 1828(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 124(%esp), %eax # 4-byte Reload
+ adcl 1832(%esp), %eax
+ movl %eax, 124(%esp) # 4-byte Spill
+ movl 128(%esp), %eax # 4-byte Reload
+ adcl 1836(%esp), %eax
+ movl %eax, 128(%esp) # 4-byte Spill
+ adcl 1840(%esp), %ebp
+ movl %ebp, 132(%esp) # 4-byte Spill
+ movl 120(%esp), %eax # 4-byte Reload
+ adcl 1844(%esp), %eax
+ movl %eax, 120(%esp) # 4-byte Spill
+ movl 116(%esp), %edi # 4-byte Reload
+ adcl 1848(%esp), %edi
+ movl 100(%esp), %ebp # 4-byte Reload
+ adcl 1852(%esp), %ebp
+ movl 112(%esp), %eax # 4-byte Reload
+ adcl 1856(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 1860(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ sbbl %esi, %esi
+ movl %ecx, %eax
+ imull 60(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 1720(%esp), %ecx
+ movl 2620(%esp), %edx
+ calll .LmulPv544x32
+ andl $1, %esi
+ movl %esi, %eax
+ movl 80(%esp), %ecx # 4-byte Reload
+ addl 1720(%esp), %ecx
+ movl 92(%esp), %ecx # 4-byte Reload
+ adcl 1724(%esp), %ecx
+ movl %ecx, 92(%esp) # 4-byte Spill
+ movl 76(%esp), %ecx # 4-byte Reload
+ adcl 1728(%esp), %ecx
+ movl %ecx, 76(%esp) # 4-byte Spill
+ movl 68(%esp), %ecx # 4-byte Reload
+ adcl 1732(%esp), %ecx
+ movl %ecx, 68(%esp) # 4-byte Spill
+ movl 64(%esp), %ecx # 4-byte Reload
+ adcl 1736(%esp), %ecx
+ movl %ecx, 64(%esp) # 4-byte Spill
+ movl 72(%esp), %ecx # 4-byte Reload
+ adcl 1740(%esp), %ecx
+ movl %ecx, 72(%esp) # 4-byte Spill
+ movl 84(%esp), %ecx # 4-byte Reload
+ adcl 1744(%esp), %ecx
+ movl %ecx, 84(%esp) # 4-byte Spill
+ movl 96(%esp), %ecx # 4-byte Reload
+ adcl 1748(%esp), %ecx
+ movl %ecx, 96(%esp) # 4-byte Spill
+ movl 104(%esp), %esi # 4-byte Reload
+ adcl 1752(%esp), %esi
+ movl 108(%esp), %ecx # 4-byte Reload
+ adcl 1756(%esp), %ecx
+ movl %ecx, 108(%esp) # 4-byte Spill
+ movl 124(%esp), %ecx # 4-byte Reload
+ adcl 1760(%esp), %ecx
+ movl %ecx, 124(%esp) # 4-byte Spill
+ movl 128(%esp), %ecx # 4-byte Reload
+ adcl 1764(%esp), %ecx
+ movl %ecx, 128(%esp) # 4-byte Spill
+ movl 132(%esp), %ecx # 4-byte Reload
+ adcl 1768(%esp), %ecx
+ movl %ecx, 132(%esp) # 4-byte Spill
+ movl 120(%esp), %ecx # 4-byte Reload
+ adcl 1772(%esp), %ecx
+ movl %ecx, 120(%esp) # 4-byte Spill
+ adcl 1776(%esp), %edi
+ movl %edi, 116(%esp) # 4-byte Spill
+ adcl 1780(%esp), %ebp
+ movl %ebp, 100(%esp) # 4-byte Spill
+ movl 112(%esp), %ecx # 4-byte Reload
+ adcl 1784(%esp), %ecx
+ movl %ecx, 112(%esp) # 4-byte Spill
+ movl 88(%esp), %ebp # 4-byte Reload
+ adcl 1788(%esp), %ebp
+ adcl $0, %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 2616(%esp), %eax
+ movl 24(%eax), %eax
+ movl %eax, (%esp)
+ leal 1648(%esp), %ecx
+ movl 2612(%esp), %eax
+ movl %eax, %edx
+ calll .LmulPv544x32
+ movl 92(%esp), %eax # 4-byte Reload
+ addl 1648(%esp), %eax
+ movl 76(%esp), %edi # 4-byte Reload
+ adcl 1652(%esp), %edi
+ movl 68(%esp), %ecx # 4-byte Reload
+ adcl 1656(%esp), %ecx
+ movl %ecx, 68(%esp) # 4-byte Spill
+ movl 64(%esp), %ecx # 4-byte Reload
+ adcl 1660(%esp), %ecx
+ movl %ecx, 64(%esp) # 4-byte Spill
+ movl 72(%esp), %ecx # 4-byte Reload
+ adcl 1664(%esp), %ecx
+ movl %ecx, 72(%esp) # 4-byte Spill
+ movl 84(%esp), %ecx # 4-byte Reload
+ adcl 1668(%esp), %ecx
+ movl %ecx, 84(%esp) # 4-byte Spill
+ movl 96(%esp), %ecx # 4-byte Reload
+ adcl 1672(%esp), %ecx
+ movl %ecx, 96(%esp) # 4-byte Spill
+ adcl 1676(%esp), %esi
+ movl %esi, 104(%esp) # 4-byte Spill
+ movl 108(%esp), %ecx # 4-byte Reload
+ adcl 1680(%esp), %ecx
+ movl %ecx, 108(%esp) # 4-byte Spill
+ movl 124(%esp), %ecx # 4-byte Reload
+ adcl 1684(%esp), %ecx
+ movl %ecx, 124(%esp) # 4-byte Spill
+ movl 128(%esp), %ecx # 4-byte Reload
+ adcl 1688(%esp), %ecx
+ movl %ecx, 128(%esp) # 4-byte Spill
+ movl 132(%esp), %ecx # 4-byte Reload
+ adcl 1692(%esp), %ecx
+ movl %ecx, 132(%esp) # 4-byte Spill
+ movl 120(%esp), %ecx # 4-byte Reload
+ adcl 1696(%esp), %ecx
+ movl %ecx, 120(%esp) # 4-byte Spill
+ movl 116(%esp), %ecx # 4-byte Reload
+ adcl 1700(%esp), %ecx
+ movl %ecx, 116(%esp) # 4-byte Spill
+ movl 100(%esp), %ecx # 4-byte Reload
+ adcl 1704(%esp), %ecx
+ movl %ecx, 100(%esp) # 4-byte Spill
+ movl 112(%esp), %ecx # 4-byte Reload
+ adcl 1708(%esp), %ecx
+ movl %ecx, 112(%esp) # 4-byte Spill
+ adcl 1712(%esp), %ebp
+ movl %ebp, 88(%esp) # 4-byte Spill
+ movl 80(%esp), %ecx # 4-byte Reload
+ adcl 1716(%esp), %ecx
+ movl %ecx, 80(%esp) # 4-byte Spill
+ sbbl %esi, %esi
+ movl %eax, %ebp
+ imull 60(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 1576(%esp), %ecx
+ movl 2620(%esp), %edx
+ calll .LmulPv544x32
+ andl $1, %esi
+ movl %esi, %ecx
+ addl 1576(%esp), %ebp
+ adcl 1580(%esp), %edi
+ movl %edi, 76(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 1584(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 64(%esp), %ebp # 4-byte Reload
+ adcl 1588(%esp), %ebp
+ movl 72(%esp), %edi # 4-byte Reload
+ adcl 1592(%esp), %edi
+ movl 84(%esp), %esi # 4-byte Reload
+ adcl 1596(%esp), %esi
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 1600(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 1604(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 1608(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 124(%esp), %eax # 4-byte Reload
+ adcl 1612(%esp), %eax
+ movl %eax, 124(%esp) # 4-byte Spill
+ movl 128(%esp), %eax # 4-byte Reload
+ adcl 1616(%esp), %eax
+ movl %eax, 128(%esp) # 4-byte Spill
+ movl 132(%esp), %eax # 4-byte Reload
+ adcl 1620(%esp), %eax
+ movl %eax, 132(%esp) # 4-byte Spill
+ movl 120(%esp), %eax # 4-byte Reload
+ adcl 1624(%esp), %eax
+ movl %eax, 120(%esp) # 4-byte Spill
+ movl 116(%esp), %eax # 4-byte Reload
+ adcl 1628(%esp), %eax
+ movl %eax, 116(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 1632(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 112(%esp), %eax # 4-byte Reload
+ adcl 1636(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 1640(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 1644(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ adcl $0, %ecx
+ movl %ecx, 92(%esp) # 4-byte Spill
+ movl 2616(%esp), %eax
+ movl 28(%eax), %eax
+ movl %eax, (%esp)
+ leal 1504(%esp), %ecx
+ movl 2612(%esp), %edx
+ calll .LmulPv544x32
+ movl 76(%esp), %ecx # 4-byte Reload
+ addl 1504(%esp), %ecx
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 1508(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ adcl 1512(%esp), %ebp
+ movl %ebp, 64(%esp) # 4-byte Spill
+ adcl 1516(%esp), %edi
+ movl %edi, 72(%esp) # 4-byte Spill
+ adcl 1520(%esp), %esi
+ movl %esi, 84(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 1524(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 1528(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 1532(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 124(%esp), %eax # 4-byte Reload
+ adcl 1536(%esp), %eax
+ movl %eax, 124(%esp) # 4-byte Spill
+ movl 128(%esp), %edi # 4-byte Reload
+ adcl 1540(%esp), %edi
+ movl 132(%esp), %eax # 4-byte Reload
+ adcl 1544(%esp), %eax
+ movl %eax, 132(%esp) # 4-byte Spill
+ movl 120(%esp), %eax # 4-byte Reload
+ adcl 1548(%esp), %eax
+ movl %eax, 120(%esp) # 4-byte Spill
+ movl 116(%esp), %eax # 4-byte Reload
+ adcl 1552(%esp), %eax
+ movl %eax, 116(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 1556(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 112(%esp), %eax # 4-byte Reload
+ adcl 1560(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 1564(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 80(%esp), %esi # 4-byte Reload
+ adcl 1568(%esp), %esi
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 1572(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ sbbl %eax, %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ movl %ecx, %ebp
+ imull 60(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 1432(%esp), %ecx
+ movl 2620(%esp), %edx
+ calll .LmulPv544x32
+ movl 76(%esp), %eax # 4-byte Reload
+ andl $1, %eax
+ addl 1432(%esp), %ebp
+ movl 68(%esp), %ecx # 4-byte Reload
+ adcl 1436(%esp), %ecx
+ movl %ecx, 68(%esp) # 4-byte Spill
+ movl 64(%esp), %ecx # 4-byte Reload
+ adcl 1440(%esp), %ecx
+ movl %ecx, 64(%esp) # 4-byte Spill
+ movl 72(%esp), %ecx # 4-byte Reload
+ adcl 1444(%esp), %ecx
+ movl %ecx, 72(%esp) # 4-byte Spill
+ movl 84(%esp), %ecx # 4-byte Reload
+ adcl 1448(%esp), %ecx
+ movl %ecx, 84(%esp) # 4-byte Spill
+ movl 96(%esp), %ecx # 4-byte Reload
+ adcl 1452(%esp), %ecx
+ movl %ecx, 96(%esp) # 4-byte Spill
+ movl 104(%esp), %ecx # 4-byte Reload
+ adcl 1456(%esp), %ecx
+ movl %ecx, 104(%esp) # 4-byte Spill
+ movl 108(%esp), %ebp # 4-byte Reload
+ adcl 1460(%esp), %ebp
+ movl 124(%esp), %ecx # 4-byte Reload
+ adcl 1464(%esp), %ecx
+ movl %ecx, 124(%esp) # 4-byte Spill
+ adcl 1468(%esp), %edi
+ movl %edi, 128(%esp) # 4-byte Spill
+ movl 132(%esp), %ecx # 4-byte Reload
+ adcl 1472(%esp), %ecx
+ movl %ecx, 132(%esp) # 4-byte Spill
+ movl 120(%esp), %edi # 4-byte Reload
+ adcl 1476(%esp), %edi
+ movl 116(%esp), %ecx # 4-byte Reload
+ adcl 1480(%esp), %ecx
+ movl %ecx, 116(%esp) # 4-byte Spill
+ movl 100(%esp), %ecx # 4-byte Reload
+ adcl 1484(%esp), %ecx
+ movl %ecx, 100(%esp) # 4-byte Spill
+ movl 112(%esp), %ecx # 4-byte Reload
+ adcl 1488(%esp), %ecx
+ movl %ecx, 112(%esp) # 4-byte Spill
+ movl 88(%esp), %ecx # 4-byte Reload
+ adcl 1492(%esp), %ecx
+ movl %ecx, 88(%esp) # 4-byte Spill
+ adcl 1496(%esp), %esi
+ movl %esi, 80(%esp) # 4-byte Spill
+ movl 92(%esp), %ecx # 4-byte Reload
+ adcl 1500(%esp), %ecx
+ movl %ecx, 92(%esp) # 4-byte Spill
+ adcl $0, %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 2616(%esp), %eax
+ movl 32(%eax), %eax
+ movl %eax, (%esp)
+ leal 1360(%esp), %ecx
+ movl 2612(%esp), %edx
+ calll .LmulPv544x32
+ movl 68(%esp), %ecx # 4-byte Reload
+ addl 1360(%esp), %ecx
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 1364(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 1368(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 1372(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 1376(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 1380(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ adcl 1384(%esp), %ebp
+ movl 124(%esp), %eax # 4-byte Reload
+ adcl 1388(%esp), %eax
+ movl %eax, 124(%esp) # 4-byte Spill
+ movl 128(%esp), %eax # 4-byte Reload
+ adcl 1392(%esp), %eax
+ movl %eax, 128(%esp) # 4-byte Spill
+ movl 132(%esp), %eax # 4-byte Reload
+ adcl 1396(%esp), %eax
+ movl %eax, 132(%esp) # 4-byte Spill
+ adcl 1400(%esp), %edi
+ movl %edi, 120(%esp) # 4-byte Spill
+ movl 116(%esp), %eax # 4-byte Reload
+ adcl 1404(%esp), %eax
+ movl %eax, 116(%esp) # 4-byte Spill
+ movl 100(%esp), %esi # 4-byte Reload
+ adcl 1408(%esp), %esi
+ movl 112(%esp), %eax # 4-byte Reload
+ adcl 1412(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 1416(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 1420(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 1424(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 1428(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ sbbl %eax, %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ movl %ecx, %edi
+ imull 60(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 1288(%esp), %ecx
+ movl 2620(%esp), %edx
+ calll .LmulPv544x32
+ movl 68(%esp), %eax # 4-byte Reload
+ andl $1, %eax
+ addl 1288(%esp), %edi
+ movl 64(%esp), %ecx # 4-byte Reload
+ adcl 1292(%esp), %ecx
+ movl %ecx, 64(%esp) # 4-byte Spill
+ movl 72(%esp), %ecx # 4-byte Reload
+ adcl 1296(%esp), %ecx
+ movl %ecx, 72(%esp) # 4-byte Spill
+ movl 84(%esp), %ecx # 4-byte Reload
+ adcl 1300(%esp), %ecx
+ movl %ecx, 84(%esp) # 4-byte Spill
+ movl 96(%esp), %ecx # 4-byte Reload
+ adcl 1304(%esp), %ecx
+ movl %ecx, 96(%esp) # 4-byte Spill
+ movl 104(%esp), %ecx # 4-byte Reload
+ adcl 1308(%esp), %ecx
+ movl %ecx, 104(%esp) # 4-byte Spill
+ adcl 1312(%esp), %ebp
+ movl 124(%esp), %ecx # 4-byte Reload
+ adcl 1316(%esp), %ecx
+ movl %ecx, 124(%esp) # 4-byte Spill
+ movl 128(%esp), %ecx # 4-byte Reload
+ adcl 1320(%esp), %ecx
+ movl %ecx, 128(%esp) # 4-byte Spill
+ movl 132(%esp), %ecx # 4-byte Reload
+ adcl 1324(%esp), %ecx
+ movl %ecx, 132(%esp) # 4-byte Spill
+ movl 120(%esp), %ecx # 4-byte Reload
+ adcl 1328(%esp), %ecx
+ movl %ecx, 120(%esp) # 4-byte Spill
+ movl 116(%esp), %ecx # 4-byte Reload
+ adcl 1332(%esp), %ecx
+ movl %ecx, 116(%esp) # 4-byte Spill
+ adcl 1336(%esp), %esi
+ movl %esi, 100(%esp) # 4-byte Spill
+ movl 112(%esp), %edi # 4-byte Reload
+ adcl 1340(%esp), %edi
+ movl 88(%esp), %esi # 4-byte Reload
+ adcl 1344(%esp), %esi
+ movl 80(%esp), %ecx # 4-byte Reload
+ adcl 1348(%esp), %ecx
+ movl %ecx, 80(%esp) # 4-byte Spill
+ movl 92(%esp), %ecx # 4-byte Reload
+ adcl 1352(%esp), %ecx
+ movl %ecx, 92(%esp) # 4-byte Spill
+ movl 76(%esp), %ecx # 4-byte Reload
+ adcl 1356(%esp), %ecx
+ movl %ecx, 76(%esp) # 4-byte Spill
+ adcl $0, %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 2616(%esp), %eax
+ movl 36(%eax), %eax
+ movl %eax, (%esp)
+ leal 1216(%esp), %ecx
+ movl 2612(%esp), %edx
+ calll .LmulPv544x32
+ movl 64(%esp), %ecx # 4-byte Reload
+ addl 1216(%esp), %ecx
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 1220(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 1224(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 1228(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 1232(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ adcl 1236(%esp), %ebp
+ movl %ebp, 108(%esp) # 4-byte Spill
+ movl 124(%esp), %ebp # 4-byte Reload
+ adcl 1240(%esp), %ebp
+ movl 128(%esp), %eax # 4-byte Reload
+ adcl 1244(%esp), %eax
+ movl %eax, 128(%esp) # 4-byte Spill
+ movl 132(%esp), %eax # 4-byte Reload
+ adcl 1248(%esp), %eax
+ movl %eax, 132(%esp) # 4-byte Spill
+ movl 120(%esp), %eax # 4-byte Reload
+ adcl 1252(%esp), %eax
+ movl %eax, 120(%esp) # 4-byte Spill
+ movl 116(%esp), %eax # 4-byte Reload
+ adcl 1256(%esp), %eax
+ movl %eax, 116(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 1260(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ adcl 1264(%esp), %edi
+ movl %edi, 112(%esp) # 4-byte Spill
+ adcl 1268(%esp), %esi
+ movl %esi, 88(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 1272(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 1276(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 1280(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 1284(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ sbbl %edi, %edi
+ movl %ecx, %eax
+ movl %ecx, %esi
+ imull 60(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 1144(%esp), %ecx
+ movl 2620(%esp), %edx
+ calll .LmulPv544x32
+ movl %edi, %eax
+ andl $1, %eax
+ addl 1144(%esp), %esi
+ movl 72(%esp), %ecx # 4-byte Reload
+ adcl 1148(%esp), %ecx
+ movl %ecx, 72(%esp) # 4-byte Spill
+ movl 84(%esp), %ecx # 4-byte Reload
+ adcl 1152(%esp), %ecx
+ movl %ecx, 84(%esp) # 4-byte Spill
+ movl 96(%esp), %ecx # 4-byte Reload
+ adcl 1156(%esp), %ecx
+ movl %ecx, 96(%esp) # 4-byte Spill
+ movl 104(%esp), %edi # 4-byte Reload
+ adcl 1160(%esp), %edi
+ movl 108(%esp), %ecx # 4-byte Reload
+ adcl 1164(%esp), %ecx
+ movl %ecx, 108(%esp) # 4-byte Spill
+ adcl 1168(%esp), %ebp
+ movl %ebp, 124(%esp) # 4-byte Spill
+ movl 128(%esp), %ecx # 4-byte Reload
+ adcl 1172(%esp), %ecx
+ movl %ecx, 128(%esp) # 4-byte Spill
+ movl 132(%esp), %ecx # 4-byte Reload
+ adcl 1176(%esp), %ecx
+ movl %ecx, 132(%esp) # 4-byte Spill
+ movl 120(%esp), %esi # 4-byte Reload
+ adcl 1180(%esp), %esi
+ movl 116(%esp), %ecx # 4-byte Reload
+ adcl 1184(%esp), %ecx
+ movl %ecx, 116(%esp) # 4-byte Spill
+ movl 100(%esp), %ecx # 4-byte Reload
+ adcl 1188(%esp), %ecx
+ movl %ecx, 100(%esp) # 4-byte Spill
+ movl 112(%esp), %ecx # 4-byte Reload
+ adcl 1192(%esp), %ecx
+ movl %ecx, 112(%esp) # 4-byte Spill
+ movl 88(%esp), %ecx # 4-byte Reload
+ adcl 1196(%esp), %ecx
+ movl %ecx, 88(%esp) # 4-byte Spill
+ movl 80(%esp), %ecx # 4-byte Reload
+ adcl 1200(%esp), %ecx
+ movl %ecx, 80(%esp) # 4-byte Spill
+ movl 92(%esp), %ecx # 4-byte Reload
+ adcl 1204(%esp), %ecx
+ movl %ecx, 92(%esp) # 4-byte Spill
+ movl 76(%esp), %ecx # 4-byte Reload
+ adcl 1208(%esp), %ecx
+ movl %ecx, 76(%esp) # 4-byte Spill
+ movl 68(%esp), %ecx # 4-byte Reload
+ adcl 1212(%esp), %ecx
+ movl %ecx, 68(%esp) # 4-byte Spill
+ adcl $0, %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 2616(%esp), %eax
+ movl 40(%eax), %eax
+ movl %eax, (%esp)
+ leal 1072(%esp), %ecx
+ movl 2612(%esp), %edx
+ calll .LmulPv544x32
+ movl 72(%esp), %ecx # 4-byte Reload
+ addl 1072(%esp), %ecx
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 1076(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 96(%esp), %ebp # 4-byte Reload
+ adcl 1080(%esp), %ebp
+ adcl 1084(%esp), %edi
+ movl %edi, 104(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 1088(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 124(%esp), %eax # 4-byte Reload
+ adcl 1092(%esp), %eax
+ movl %eax, 124(%esp) # 4-byte Spill
+ movl 128(%esp), %eax # 4-byte Reload
+ adcl 1096(%esp), %eax
+ movl %eax, 128(%esp) # 4-byte Spill
+ movl 132(%esp), %eax # 4-byte Reload
+ adcl 1100(%esp), %eax
+ movl %eax, 132(%esp) # 4-byte Spill
+ adcl 1104(%esp), %esi
+ movl %esi, 120(%esp) # 4-byte Spill
+ movl 116(%esp), %eax # 4-byte Reload
+ adcl 1108(%esp), %eax
+ movl %eax, 116(%esp) # 4-byte Spill
+ movl 100(%esp), %esi # 4-byte Reload
+ adcl 1112(%esp), %esi
+ movl 112(%esp), %eax # 4-byte Reload
+ adcl 1116(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 1120(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 1124(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 1128(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 1132(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 1136(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 1140(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ sbbl %eax, %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ movl %ecx, %edi
+ imull 60(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 1000(%esp), %ecx
+ movl 2620(%esp), %edx
+ calll .LmulPv544x32
+ movl 72(%esp), %eax # 4-byte Reload
+ andl $1, %eax
+ addl 1000(%esp), %edi
+ movl 84(%esp), %ecx # 4-byte Reload
+ adcl 1004(%esp), %ecx
+ movl %ecx, 84(%esp) # 4-byte Spill
+ adcl 1008(%esp), %ebp
+ movl %ebp, 96(%esp) # 4-byte Spill
+ movl 104(%esp), %ecx # 4-byte Reload
+ adcl 1012(%esp), %ecx
+ movl %ecx, 104(%esp) # 4-byte Spill
+ movl 108(%esp), %ecx # 4-byte Reload
+ adcl 1016(%esp), %ecx
+ movl %ecx, 108(%esp) # 4-byte Spill
+ movl 124(%esp), %ecx # 4-byte Reload
+ adcl 1020(%esp), %ecx
+ movl %ecx, 124(%esp) # 4-byte Spill
+ movl 128(%esp), %ecx # 4-byte Reload
+ adcl 1024(%esp), %ecx
+ movl %ecx, 128(%esp) # 4-byte Spill
+ movl 132(%esp), %ebp # 4-byte Reload
+ adcl 1028(%esp), %ebp
+ movl 120(%esp), %ecx # 4-byte Reload
+ adcl 1032(%esp), %ecx
+ movl %ecx, 120(%esp) # 4-byte Spill
+ movl 116(%esp), %edi # 4-byte Reload
+ adcl 1036(%esp), %edi
+ adcl 1040(%esp), %esi
+ movl %esi, 100(%esp) # 4-byte Spill
+ movl 112(%esp), %ecx # 4-byte Reload
+ adcl 1044(%esp), %ecx
+ movl %ecx, 112(%esp) # 4-byte Spill
+ movl 88(%esp), %ecx # 4-byte Reload
+ adcl 1048(%esp), %ecx
+ movl %ecx, 88(%esp) # 4-byte Spill
+ movl 80(%esp), %ecx # 4-byte Reload
+ adcl 1052(%esp), %ecx
+ movl %ecx, 80(%esp) # 4-byte Spill
+ movl 92(%esp), %esi # 4-byte Reload
+ adcl 1056(%esp), %esi
+ movl 76(%esp), %ecx # 4-byte Reload
+ adcl 1060(%esp), %ecx
+ movl %ecx, 76(%esp) # 4-byte Spill
+ movl 68(%esp), %ecx # 4-byte Reload
+ adcl 1064(%esp), %ecx
+ movl %ecx, 68(%esp) # 4-byte Spill
+ movl 64(%esp), %ecx # 4-byte Reload
+ adcl 1068(%esp), %ecx
+ movl %ecx, 64(%esp) # 4-byte Spill
+ adcl $0, %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 2616(%esp), %eax
+ movl 44(%eax), %eax
+ movl %eax, (%esp)
+ leal 928(%esp), %ecx
+ movl 2612(%esp), %edx
+ calll .LmulPv544x32
+ movl 84(%esp), %ecx # 4-byte Reload
+ addl 928(%esp), %ecx
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 932(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 936(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 940(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 124(%esp), %eax # 4-byte Reload
+ adcl 944(%esp), %eax
+ movl %eax, 124(%esp) # 4-byte Spill
+ movl 128(%esp), %eax # 4-byte Reload
+ adcl 948(%esp), %eax
+ movl %eax, 128(%esp) # 4-byte Spill
+ adcl 952(%esp), %ebp
+ movl %ebp, 132(%esp) # 4-byte Spill
+ movl 120(%esp), %eax # 4-byte Reload
+ adcl 956(%esp), %eax
+ movl %eax, 120(%esp) # 4-byte Spill
+ adcl 960(%esp), %edi
+ movl %edi, 116(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 964(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 112(%esp), %eax # 4-byte Reload
+ adcl 968(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 972(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 976(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ adcl 980(%esp), %esi
+ movl %esi, 92(%esp) # 4-byte Spill
+ movl 76(%esp), %edi # 4-byte Reload
+ adcl 984(%esp), %edi
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 988(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 992(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 996(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ sbbl %esi, %esi
+ movl %ecx, %eax
+ movl %ecx, %ebp
+ imull 60(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 856(%esp), %ecx
+ movl 2620(%esp), %edx
+ calll .LmulPv544x32
+ andl $1, %esi
+ movl %esi, %ecx
+ addl 856(%esp), %ebp
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 860(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 864(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 868(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 124(%esp), %eax # 4-byte Reload
+ adcl 872(%esp), %eax
+ movl %eax, 124(%esp) # 4-byte Spill
+ movl 128(%esp), %eax # 4-byte Reload
+ adcl 876(%esp), %eax
+ movl %eax, 128(%esp) # 4-byte Spill
+ movl 132(%esp), %eax # 4-byte Reload
+ adcl 880(%esp), %eax
+ movl %eax, 132(%esp) # 4-byte Spill
+ movl 120(%esp), %eax # 4-byte Reload
+ adcl 884(%esp), %eax
+ movl %eax, 120(%esp) # 4-byte Spill
+ movl 116(%esp), %ebp # 4-byte Reload
+ adcl 888(%esp), %ebp
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 892(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 112(%esp), %esi # 4-byte Reload
+ adcl 896(%esp), %esi
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 900(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 904(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 908(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ adcl 912(%esp), %edi
+ movl %edi, 76(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 916(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 920(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 924(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ adcl $0, %ecx
+ movl %ecx, 84(%esp) # 4-byte Spill
+ movl 2616(%esp), %ecx
+ movl %ecx, %eax
+ movl 48(%eax), %eax
+ movl %eax, (%esp)
+ leal 784(%esp), %ecx
+ movl 2612(%esp), %eax
+ movl %eax, %edx
+ calll .LmulPv544x32
+ movl 96(%esp), %ecx # 4-byte Reload
+ addl 784(%esp), %ecx
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 788(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 792(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 124(%esp), %eax # 4-byte Reload
+ adcl 796(%esp), %eax
+ movl %eax, 124(%esp) # 4-byte Spill
+ movl 128(%esp), %eax # 4-byte Reload
+ adcl 800(%esp), %eax
+ movl %eax, 128(%esp) # 4-byte Spill
+ movl 132(%esp), %eax # 4-byte Reload
+ adcl 804(%esp), %eax
+ movl %eax, 132(%esp) # 4-byte Spill
+ movl 120(%esp), %eax # 4-byte Reload
+ adcl 808(%esp), %eax
+ movl %eax, 120(%esp) # 4-byte Spill
+ adcl 812(%esp), %ebp
+ movl %ebp, 116(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 816(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ adcl 820(%esp), %esi
+ movl %esi, 112(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 824(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 80(%esp), %edi # 4-byte Reload
+ adcl 828(%esp), %edi
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 832(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 836(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 840(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 844(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 848(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 852(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ sbbl %esi, %esi
+ movl %ecx, %eax
+ movl %ecx, %ebp
+ imull 60(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 712(%esp), %ecx
+ movl 2620(%esp), %edx
+ calll .LmulPv544x32
+ andl $1, %esi
+ movl %esi, %ecx
+ addl 712(%esp), %ebp
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 716(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 720(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 124(%esp), %eax # 4-byte Reload
+ adcl 724(%esp), %eax
+ movl %eax, 124(%esp) # 4-byte Spill
+ movl 128(%esp), %esi # 4-byte Reload
+ adcl 728(%esp), %esi
+ movl 132(%esp), %eax # 4-byte Reload
+ adcl 732(%esp), %eax
+ movl %eax, 132(%esp) # 4-byte Spill
+ movl 120(%esp), %ebp # 4-byte Reload
+ adcl 736(%esp), %ebp
+ movl 116(%esp), %eax # 4-byte Reload
+ adcl 740(%esp), %eax
+ movl %eax, 116(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 744(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 112(%esp), %eax # 4-byte Reload
+ adcl 748(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 752(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ adcl 756(%esp), %edi
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 760(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 764(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 768(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 772(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 776(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 780(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ adcl $0, %ecx
+ movl %ecx, 96(%esp) # 4-byte Spill
+ movl 2616(%esp), %eax
+ movl 52(%eax), %eax
+ movl %eax, (%esp)
+ leal 640(%esp), %ecx
+ movl 2612(%esp), %edx
+ calll .LmulPv544x32
+ movl 104(%esp), %ecx # 4-byte Reload
+ addl 640(%esp), %ecx
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 644(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 124(%esp), %eax # 4-byte Reload
+ adcl 648(%esp), %eax
+ movl %eax, 124(%esp) # 4-byte Spill
+ adcl 652(%esp), %esi
+ movl %esi, 128(%esp) # 4-byte Spill
+ movl 132(%esp), %eax # 4-byte Reload
+ adcl 656(%esp), %eax
+ movl %eax, 132(%esp) # 4-byte Spill
+ adcl 660(%esp), %ebp
+ movl %ebp, 120(%esp) # 4-byte Spill
+ movl 116(%esp), %eax # 4-byte Reload
+ adcl 664(%esp), %eax
+ movl %eax, 116(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 668(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 112(%esp), %eax # 4-byte Reload
+ adcl 672(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 676(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ adcl 680(%esp), %edi
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 684(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 688(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 692(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 64(%esp), %ebp # 4-byte Reload
+ adcl 696(%esp), %ebp
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 700(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 704(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 708(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ sbbl %eax, %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ movl %ecx, %esi
+ imull 60(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 568(%esp), %ecx
+ movl 2620(%esp), %edx
+ calll .LmulPv544x32
+ movl 104(%esp), %ecx # 4-byte Reload
+ andl $1, %ecx
+ addl 568(%esp), %esi
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 572(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 124(%esp), %eax # 4-byte Reload
+ adcl 576(%esp), %eax
+ movl %eax, 124(%esp) # 4-byte Spill
+ movl 128(%esp), %eax # 4-byte Reload
+ adcl 580(%esp), %eax
+ movl %eax, 128(%esp) # 4-byte Spill
+ movl 132(%esp), %eax # 4-byte Reload
+ adcl 584(%esp), %eax
+ movl %eax, 132(%esp) # 4-byte Spill
+ movl 120(%esp), %eax # 4-byte Reload
+ adcl 588(%esp), %eax
+ movl %eax, 120(%esp) # 4-byte Spill
+ movl 116(%esp), %eax # 4-byte Reload
+ adcl 592(%esp), %eax
+ movl %eax, 116(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 596(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 112(%esp), %esi # 4-byte Reload
+ adcl 600(%esp), %esi
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 604(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ adcl 608(%esp), %edi
+ movl %edi, 80(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 612(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 76(%esp), %edi # 4-byte Reload
+ adcl 616(%esp), %edi
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 620(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ adcl 624(%esp), %ebp
+ movl %ebp, 64(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 628(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 632(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 636(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ adcl $0, %ecx
+ movl %ecx, 104(%esp) # 4-byte Spill
+ movl 2616(%esp), %eax
+ movl 56(%eax), %eax
+ movl %eax, (%esp)
+ leal 496(%esp), %ecx
+ movl 2612(%esp), %edx
+ calll .LmulPv544x32
+ movl 108(%esp), %ecx # 4-byte Reload
+ addl 496(%esp), %ecx
+ movl 124(%esp), %eax # 4-byte Reload
+ adcl 500(%esp), %eax
+ movl %eax, 124(%esp) # 4-byte Spill
+ movl 128(%esp), %eax # 4-byte Reload
+ adcl 504(%esp), %eax
+ movl %eax, 128(%esp) # 4-byte Spill
+ movl 132(%esp), %eax # 4-byte Reload
+ adcl 508(%esp), %eax
+ movl %eax, 132(%esp) # 4-byte Spill
+ movl 120(%esp), %eax # 4-byte Reload
+ adcl 512(%esp), %eax
+ movl %eax, 120(%esp) # 4-byte Spill
+ movl 116(%esp), %eax # 4-byte Reload
+ adcl 516(%esp), %eax
+ movl %eax, 116(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 520(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ adcl 524(%esp), %esi
+ movl %esi, 112(%esp) # 4-byte Spill
+ movl 88(%esp), %ebp # 4-byte Reload
+ adcl 528(%esp), %ebp
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 532(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 536(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ adcl 540(%esp), %edi
+ movl %edi, 76(%esp) # 4-byte Spill
+ movl 68(%esp), %edi # 4-byte Reload
+ adcl 544(%esp), %edi
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 548(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 552(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 556(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 560(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 564(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ sbbl %eax, %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ movl %ecx, %esi
+ imull 60(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 424(%esp), %ecx
+ movl 2620(%esp), %edx
+ calll .LmulPv544x32
+ movl 108(%esp), %ecx # 4-byte Reload
+ andl $1, %ecx
+ addl 424(%esp), %esi
+ movl 124(%esp), %eax # 4-byte Reload
+ adcl 428(%esp), %eax
+ movl %eax, 124(%esp) # 4-byte Spill
+ movl 128(%esp), %eax # 4-byte Reload
+ adcl 432(%esp), %eax
+ movl %eax, 128(%esp) # 4-byte Spill
+ movl 132(%esp), %eax # 4-byte Reload
+ adcl 436(%esp), %eax
+ movl %eax, 132(%esp) # 4-byte Spill
+ movl 120(%esp), %esi # 4-byte Reload
+ adcl 440(%esp), %esi
+ movl 116(%esp), %eax # 4-byte Reload
+ adcl 444(%esp), %eax
+ movl %eax, 116(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 448(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 112(%esp), %eax # 4-byte Reload
+ adcl 452(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ adcl 456(%esp), %ebp
+ movl %ebp, 88(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 460(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 464(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 468(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ adcl 472(%esp), %edi
+ movl %edi, 68(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 476(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 72(%esp), %ebp # 4-byte Reload
+ adcl 480(%esp), %ebp
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 484(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 488(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 492(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ adcl $0, %ecx
+ movl %ecx, 108(%esp) # 4-byte Spill
+ movl 2616(%esp), %eax
+ movl 60(%eax), %eax
+ movl %eax, (%esp)
+ leal 352(%esp), %ecx
+ movl 2612(%esp), %edx
+ calll .LmulPv544x32
+ movl 124(%esp), %ecx # 4-byte Reload
+ addl 352(%esp), %ecx
+ movl 128(%esp), %eax # 4-byte Reload
+ adcl 356(%esp), %eax
+ movl %eax, 128(%esp) # 4-byte Spill
+ movl 132(%esp), %eax # 4-byte Reload
+ adcl 360(%esp), %eax
+ movl %eax, 132(%esp) # 4-byte Spill
+ adcl 364(%esp), %esi
+ movl %esi, 120(%esp) # 4-byte Spill
+ movl 116(%esp), %eax # 4-byte Reload
+ adcl 368(%esp), %eax
+ movl %eax, 116(%esp) # 4-byte Spill
+ movl 100(%esp), %edi # 4-byte Reload
+ adcl 372(%esp), %edi
+ movl 112(%esp), %eax # 4-byte Reload
+ adcl 376(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 380(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 384(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 388(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 392(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 396(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 400(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ adcl 404(%esp), %ebp
+ movl %ebp, 72(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 408(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 412(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 416(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 420(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ sbbl %esi, %esi
+ movl %ecx, %eax
+ movl %ecx, %ebp
+ imull 60(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 280(%esp), %ecx
+ movl 2620(%esp), %edx
+ calll .LmulPv544x32
+ andl $1, %esi
+ movl %esi, %ecx
+ addl 280(%esp), %ebp
+ movl 128(%esp), %eax # 4-byte Reload
+ adcl 284(%esp), %eax
+ movl %eax, 128(%esp) # 4-byte Spill
+ movl 132(%esp), %esi # 4-byte Reload
+ adcl 288(%esp), %esi
+ movl 120(%esp), %eax # 4-byte Reload
+ adcl 292(%esp), %eax
+ movl %eax, 120(%esp) # 4-byte Spill
+ movl 116(%esp), %ebp # 4-byte Reload
+ adcl 296(%esp), %ebp
+ adcl 300(%esp), %edi
+ movl %edi, 100(%esp) # 4-byte Spill
+ movl 112(%esp), %eax # 4-byte Reload
+ adcl 304(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 88(%esp), %edi # 4-byte Reload
+ adcl 308(%esp), %edi
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 312(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 316(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 320(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 324(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 328(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 332(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 336(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 340(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 344(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 348(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ adcl $0, %ecx
+ movl %ecx, 124(%esp) # 4-byte Spill
+ movl 2616(%esp), %eax
+ movl 64(%eax), %eax
+ movl %eax, (%esp)
+ leal 208(%esp), %ecx
+ movl 2612(%esp), %edx
+ calll .LmulPv544x32
+ movl 128(%esp), %ecx # 4-byte Reload
+ addl 208(%esp), %ecx
+ adcl 212(%esp), %esi
+ movl %esi, 132(%esp) # 4-byte Spill
+ movl 120(%esp), %eax # 4-byte Reload
+ adcl 216(%esp), %eax
+ movl %eax, 120(%esp) # 4-byte Spill
+ adcl 220(%esp), %ebp
+ movl %ebp, 116(%esp) # 4-byte Spill
+ movl 100(%esp), %ebp # 4-byte Reload
+ adcl 224(%esp), %ebp
+ movl 112(%esp), %eax # 4-byte Reload
+ adcl 228(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ adcl 232(%esp), %edi
+ movl %edi, 88(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 236(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 240(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 244(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 248(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 252(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 256(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 260(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 264(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 268(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 272(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 124(%esp), %eax # 4-byte Reload
+ adcl 276(%esp), %eax
+ movl %eax, 124(%esp) # 4-byte Spill
+ sbbl %edi, %edi
+ movl 60(%esp), %eax # 4-byte Reload
+ imull %ecx, %eax
+ movl %ecx, %esi
+ movl %eax, (%esp)
+ leal 136(%esp), %ecx
+ movl 2620(%esp), %eax
+ movl %eax, %edx
+ calll .LmulPv544x32
+ andl $1, %edi
+ addl 136(%esp), %esi
+ movl 116(%esp), %edx # 4-byte Reload
+ movl 132(%esp), %eax # 4-byte Reload
+ adcl 140(%esp), %eax
+ movl %eax, 132(%esp) # 4-byte Spill
+ movl 120(%esp), %eax # 4-byte Reload
+ adcl 144(%esp), %eax
+ movl %eax, 120(%esp) # 4-byte Spill
+ adcl 148(%esp), %edx
+ movl %edx, 116(%esp) # 4-byte Spill
+ adcl 152(%esp), %ebp
+ movl %ebp, 100(%esp) # 4-byte Spill
+ movl 112(%esp), %eax # 4-byte Reload
+ adcl 156(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 160(%esp), %eax
+ movl 80(%esp), %ecx # 4-byte Reload
+ adcl 164(%esp), %ecx
+ movl %ecx, 80(%esp) # 4-byte Spill
+ movl 92(%esp), %esi # 4-byte Reload
+ adcl 168(%esp), %esi
+ movl %esi, 92(%esp) # 4-byte Spill
+ movl 76(%esp), %ecx # 4-byte Reload
+ adcl 172(%esp), %ecx
+ movl %ecx, 76(%esp) # 4-byte Spill
+ movl 68(%esp), %ecx # 4-byte Reload
+ adcl 176(%esp), %ecx
+ movl %ecx, 68(%esp) # 4-byte Spill
+ movl 64(%esp), %ecx # 4-byte Reload
+ adcl 180(%esp), %ecx
+ movl %ecx, 64(%esp) # 4-byte Spill
+ movl 72(%esp), %ecx # 4-byte Reload
+ adcl 184(%esp), %ecx
+ movl %ecx, 72(%esp) # 4-byte Spill
+ movl 84(%esp), %ecx # 4-byte Reload
+ adcl 188(%esp), %ecx
+ movl %ecx, 84(%esp) # 4-byte Spill
+ movl 96(%esp), %ecx # 4-byte Reload
+ adcl 192(%esp), %ecx
+ movl %ecx, 96(%esp) # 4-byte Spill
+ movl 104(%esp), %ecx # 4-byte Reload
+ adcl 196(%esp), %ecx
+ movl %ecx, 104(%esp) # 4-byte Spill
+ movl 108(%esp), %ecx # 4-byte Reload
+ adcl 200(%esp), %ecx
+ movl %ecx, 108(%esp) # 4-byte Spill
+ movl 124(%esp), %ecx # 4-byte Reload
+ adcl 204(%esp), %ecx
+ movl %ecx, 124(%esp) # 4-byte Spill
+ adcl $0, %edi
+ movl 132(%esp), %ecx # 4-byte Reload
+ movl 2620(%esp), %ebx
+ subl (%ebx), %ecx
+ movl %ecx, 12(%esp) # 4-byte Spill
+ movl 120(%esp), %ecx # 4-byte Reload
+ sbbl 4(%ebx), %ecx
+ movl %ecx, 16(%esp) # 4-byte Spill
+ sbbl 8(%ebx), %edx
+ movl %edx, 20(%esp) # 4-byte Spill
+ sbbl 12(%ebx), %ebp
+ movl %ebp, 24(%esp) # 4-byte Spill
+ movl 112(%esp), %ebp # 4-byte Reload
+ movl %eax, %edx
+ sbbl 16(%ebx), %ebp
+ movl %ebp, 28(%esp) # 4-byte Spill
+ sbbl 20(%ebx), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ sbbl 24(%ebx), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ sbbl 28(%ebx), %esi
+ movl %esi, 40(%esp) # 4-byte Spill
+ movl 76(%esp), %esi # 4-byte Reload
+ sbbl 32(%ebx), %esi
+ movl %esi, 44(%esp) # 4-byte Spill
+ movl 68(%esp), %esi # 4-byte Reload
+ sbbl 36(%ebx), %esi
+ movl %esi, 48(%esp) # 4-byte Spill
+ movl 64(%esp), %esi # 4-byte Reload
+ sbbl 40(%ebx), %esi
+ movl %esi, 52(%esp) # 4-byte Spill
+ movl 72(%esp), %esi # 4-byte Reload
+ sbbl 44(%ebx), %esi
+ movl %esi, 56(%esp) # 4-byte Spill
+ movl 84(%esp), %esi # 4-byte Reload
+ sbbl 48(%ebx), %esi
+ movl %esi, 60(%esp) # 4-byte Spill
+ movl 96(%esp), %ebp # 4-byte Reload
+ sbbl 52(%ebx), %ebp
+ movl %ebp, 88(%esp) # 4-byte Spill
+ movl 104(%esp), %esi # 4-byte Reload
+ sbbl 56(%ebx), %esi
+ movl %esi, 128(%esp) # 4-byte Spill
+ movl %ebx, %ebp
+ movl 108(%esp), %ebx # 4-byte Reload
+ sbbl 60(%ebp), %ebx
+ movl 124(%esp), %esi # 4-byte Reload
+ sbbl 64(%ebp), %esi
+ movl %esi, %ebp
+ sbbl $0, %edi
+ andl $1, %edi
+ jne .LBB258_2
+# BB#1:
+ movl %ebx, 108(%esp) # 4-byte Spill
+.LBB258_2:
+ movl %edi, %ebx
+ testb %bl, %bl
+ movl 132(%esp), %ebx # 4-byte Reload
+ jne .LBB258_4
+# BB#3:
+ movl 12(%esp), %ebx # 4-byte Reload
+.LBB258_4:
+ movl 2608(%esp), %eax
+ movl %ebx, (%eax)
+ movl 120(%esp), %ebx # 4-byte Reload
+ jne .LBB258_6
+# BB#5:
+ movl 16(%esp), %ebx # 4-byte Reload
+.LBB258_6:
+ movl %ebx, 4(%eax)
+ jne .LBB258_8
+# BB#7:
+ movl 20(%esp), %ecx # 4-byte Reload
+ movl %ecx, 116(%esp) # 4-byte Spill
+.LBB258_8:
+ movl 116(%esp), %ecx # 4-byte Reload
+ movl %ecx, 8(%eax)
+ jne .LBB258_10
+# BB#9:
+ movl 24(%esp), %ecx # 4-byte Reload
+ movl %ecx, 100(%esp) # 4-byte Spill
+.LBB258_10:
+ movl 100(%esp), %ecx # 4-byte Reload
+ movl %ecx, 12(%eax)
+ movl 112(%esp), %esi # 4-byte Reload
+ jne .LBB258_12
+# BB#11:
+ movl 28(%esp), %esi # 4-byte Reload
+.LBB258_12:
+ movl %esi, 16(%eax)
+ movl 80(%esp), %ecx # 4-byte Reload
+ jne .LBB258_14
+# BB#13:
+ movl 32(%esp), %edx # 4-byte Reload
+.LBB258_14:
+ movl %edx, 20(%eax)
+ jne .LBB258_16
+# BB#15:
+ movl 36(%esp), %ecx # 4-byte Reload
+.LBB258_16:
+ movl %ecx, 24(%eax)
+ movl 92(%esp), %ecx # 4-byte Reload
+ jne .LBB258_18
+# BB#17:
+ movl 40(%esp), %ecx # 4-byte Reload
+.LBB258_18:
+ movl %ecx, 28(%eax)
+ movl 76(%esp), %ecx # 4-byte Reload
+ jne .LBB258_20
+# BB#19:
+ movl 44(%esp), %ecx # 4-byte Reload
+.LBB258_20:
+ movl %ecx, 32(%eax)
+ movl 68(%esp), %ecx # 4-byte Reload
+ jne .LBB258_22
+# BB#21:
+ movl 48(%esp), %ecx # 4-byte Reload
+.LBB258_22:
+ movl %ecx, 36(%eax)
+ movl 64(%esp), %ecx # 4-byte Reload
+ jne .LBB258_24
+# BB#23:
+ movl 52(%esp), %ecx # 4-byte Reload
+.LBB258_24:
+ movl %ecx, 40(%eax)
+ movl 72(%esp), %ecx # 4-byte Reload
+ jne .LBB258_26
+# BB#25:
+ movl 56(%esp), %ecx # 4-byte Reload
+.LBB258_26:
+ movl %ecx, 44(%eax)
+ movl 84(%esp), %ecx # 4-byte Reload
+ jne .LBB258_28
+# BB#27:
+ movl 60(%esp), %ecx # 4-byte Reload
+.LBB258_28:
+ movl %ecx, 48(%eax)
+ movl 96(%esp), %ecx # 4-byte Reload
+ jne .LBB258_30
+# BB#29:
+ movl 88(%esp), %ecx # 4-byte Reload
+.LBB258_30:
+ movl %ecx, 52(%eax)
+ movl 104(%esp), %ecx # 4-byte Reload
+ jne .LBB258_32
+# BB#31:
+ movl 128(%esp), %ecx # 4-byte Reload
+.LBB258_32:
+ movl %ecx, 56(%eax)
+ movl 108(%esp), %ecx # 4-byte Reload
+ movl %ecx, 60(%eax)
+ movl 124(%esp), %ecx # 4-byte Reload
+ jne .LBB258_34
+# BB#33:
+ movl %ebp, %ecx
+.LBB258_34:
+ movl %ecx, 64(%eax)
+ addl $2588, %esp # imm = 0xA1C
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end258:
+ .size mcl_fp_mont17L, .Lfunc_end258-mcl_fp_mont17L
+
+ .globl mcl_fp_montNF17L
+ .align 16, 0x90
+ .type mcl_fp_montNF17L,@function
+mcl_fp_montNF17L: # @mcl_fp_montNF17L
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $2572, %esp # imm = 0xA0C
+ calll .L259$pb
+.L259$pb:
+ popl %ebx
+.Ltmp60:
+ addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp60-.L259$pb), %ebx
+ movl 2604(%esp), %eax
+ movl -4(%eax), %esi
+ movl %esi, 48(%esp) # 4-byte Spill
+ movl 2600(%esp), %eax
+ movl (%eax), %eax
+ movl %eax, (%esp)
+ leal 2496(%esp), %ecx
+ movl 2596(%esp), %edx
+ calll .LmulPv544x32
+ movl 2496(%esp), %edi
+ movl 2500(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl %edi, %eax
+ imull %esi, %eax
+ movl 2564(%esp), %ecx
+ movl %ecx, 116(%esp) # 4-byte Spill
+ movl 2560(%esp), %ecx
+ movl %ecx, 100(%esp) # 4-byte Spill
+ movl 2556(%esp), %ecx
+ movl %ecx, 108(%esp) # 4-byte Spill
+ movl 2552(%esp), %ecx
+ movl %ecx, 96(%esp) # 4-byte Spill
+ movl 2548(%esp), %ecx
+ movl %ecx, 88(%esp) # 4-byte Spill
+ movl 2544(%esp), %ecx
+ movl %ecx, 64(%esp) # 4-byte Spill
+ movl 2540(%esp), %ecx
+ movl %ecx, 72(%esp) # 4-byte Spill
+ movl 2536(%esp), %ecx
+ movl %ecx, 68(%esp) # 4-byte Spill
+ movl 2532(%esp), %ecx
+ movl %ecx, 56(%esp) # 4-byte Spill
+ movl 2528(%esp), %ecx
+ movl %ecx, 52(%esp) # 4-byte Spill
+ movl 2524(%esp), %ebp
+ movl 2520(%esp), %esi
+ movl 2516(%esp), %ecx
+ movl %ecx, 84(%esp) # 4-byte Spill
+ movl 2512(%esp), %ecx
+ movl %ecx, 76(%esp) # 4-byte Spill
+ movl 2508(%esp), %ecx
+ movl %ecx, 104(%esp) # 4-byte Spill
+ movl 2504(%esp), %ecx
+ movl %ecx, 92(%esp) # 4-byte Spill
+ movl %eax, (%esp)
+ leal 2424(%esp), %ecx
+ movl 2604(%esp), %edx
+ calll .LmulPv544x32
+ addl 2424(%esp), %edi
+ movl 112(%esp), %eax # 4-byte Reload
+ adcl 2428(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 2432(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 2436(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 2440(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 2444(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ adcl 2448(%esp), %esi
+ movl %esi, 80(%esp) # 4-byte Spill
+ adcl 2452(%esp), %ebp
+ movl %ebp, 60(%esp) # 4-byte Spill
+ movl 52(%esp), %ebp # 4-byte Reload
+ adcl 2456(%esp), %ebp
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 2460(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 2464(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 2468(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 64(%esp), %esi # 4-byte Reload
+ adcl 2472(%esp), %esi
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 2476(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 96(%esp), %edi # 4-byte Reload
+ adcl 2480(%esp), %edi
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 2484(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 2488(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 116(%esp), %eax # 4-byte Reload
+ adcl 2492(%esp), %eax
+ movl %eax, 116(%esp) # 4-byte Spill
+ movl 2600(%esp), %eax
+ movl 4(%eax), %eax
+ movl %eax, (%esp)
+ leal 2352(%esp), %ecx
+ movl 2596(%esp), %edx
+ calll .LmulPv544x32
+ movl 2420(%esp), %ecx
+ movl 112(%esp), %edx # 4-byte Reload
+ addl 2352(%esp), %edx
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 2356(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 2360(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 2364(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 2368(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 2372(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 2376(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ adcl 2380(%esp), %ebp
+ movl %ebp, 52(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 2384(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 2388(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 2392(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ adcl 2396(%esp), %esi
+ movl %esi, %ebp
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 2400(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ adcl 2404(%esp), %edi
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 2408(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 2412(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 116(%esp), %eax # 4-byte Reload
+ adcl 2416(%esp), %eax
+ movl %eax, 116(%esp) # 4-byte Spill
+ adcl $0, %ecx
+ movl %ecx, 112(%esp) # 4-byte Spill
+ movl %edx, %esi
+ movl %esi, %eax
+ imull 48(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 2280(%esp), %ecx
+ movl 2604(%esp), %edx
+ calll .LmulPv544x32
+ addl 2280(%esp), %esi
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 2284(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 2288(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 2292(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 2296(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 2300(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 2304(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 2308(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 2312(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 68(%esp), %esi # 4-byte Reload
+ adcl 2316(%esp), %esi
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 2320(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ adcl 2324(%esp), %ebp
+ movl %ebp, 64(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 2328(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ adcl 2332(%esp), %edi
+ movl %edi, 96(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 2336(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 100(%esp), %ebp # 4-byte Reload
+ adcl 2340(%esp), %ebp
+ movl 116(%esp), %edi # 4-byte Reload
+ adcl 2344(%esp), %edi
+ movl 112(%esp), %eax # 4-byte Reload
+ adcl 2348(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 2600(%esp), %eax
+ movl 8(%eax), %eax
+ movl %eax, (%esp)
+ leal 2208(%esp), %ecx
+ movl 2596(%esp), %edx
+ calll .LmulPv544x32
+ movl 2276(%esp), %eax
+ movl 92(%esp), %edx # 4-byte Reload
+ addl 2208(%esp), %edx
+ movl 104(%esp), %ecx # 4-byte Reload
+ adcl 2212(%esp), %ecx
+ movl %ecx, 104(%esp) # 4-byte Spill
+ movl 76(%esp), %ecx # 4-byte Reload
+ adcl 2216(%esp), %ecx
+ movl %ecx, 76(%esp) # 4-byte Spill
+ movl 84(%esp), %ecx # 4-byte Reload
+ adcl 2220(%esp), %ecx
+ movl %ecx, 84(%esp) # 4-byte Spill
+ movl 80(%esp), %ecx # 4-byte Reload
+ adcl 2224(%esp), %ecx
+ movl %ecx, 80(%esp) # 4-byte Spill
+ movl 60(%esp), %ecx # 4-byte Reload
+ adcl 2228(%esp), %ecx
+ movl %ecx, 60(%esp) # 4-byte Spill
+ movl 52(%esp), %ecx # 4-byte Reload
+ adcl 2232(%esp), %ecx
+ movl %ecx, 52(%esp) # 4-byte Spill
+ movl 56(%esp), %ecx # 4-byte Reload
+ adcl 2236(%esp), %ecx
+ movl %ecx, 56(%esp) # 4-byte Spill
+ adcl 2240(%esp), %esi
+ movl %esi, 68(%esp) # 4-byte Spill
+ movl 72(%esp), %ecx # 4-byte Reload
+ adcl 2244(%esp), %ecx
+ movl %ecx, 72(%esp) # 4-byte Spill
+ movl 64(%esp), %ecx # 4-byte Reload
+ adcl 2248(%esp), %ecx
+ movl %ecx, 64(%esp) # 4-byte Spill
+ movl 88(%esp), %ecx # 4-byte Reload
+ adcl 2252(%esp), %ecx
+ movl %ecx, 88(%esp) # 4-byte Spill
+ movl 96(%esp), %ecx # 4-byte Reload
+ adcl 2256(%esp), %ecx
+ movl %ecx, 96(%esp) # 4-byte Spill
+ movl 108(%esp), %ecx # 4-byte Reload
+ adcl 2260(%esp), %ecx
+ movl %ecx, 108(%esp) # 4-byte Spill
+ adcl 2264(%esp), %ebp
+ adcl 2268(%esp), %edi
+ movl %edi, %esi
+ movl 112(%esp), %ecx # 4-byte Reload
+ adcl 2272(%esp), %ecx
+ movl %ecx, 112(%esp) # 4-byte Spill
+ adcl $0, %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl %edx, %edi
+ movl %edi, %eax
+ imull 48(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 2136(%esp), %ecx
+ movl 2604(%esp), %eax
+ movl %eax, %edx
+ calll .LmulPv544x32
+ addl 2136(%esp), %edi
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 2140(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 2144(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 2148(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 2152(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 2156(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 2160(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 2164(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 2168(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 2172(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 2176(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 2180(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 2184(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 108(%esp), %edi # 4-byte Reload
+ adcl 2188(%esp), %edi
+ adcl 2192(%esp), %ebp
+ movl %ebp, 100(%esp) # 4-byte Spill
+ adcl 2196(%esp), %esi
+ movl %esi, 116(%esp) # 4-byte Spill
+ movl 112(%esp), %esi # 4-byte Reload
+ adcl 2200(%esp), %esi
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 2204(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 2600(%esp), %eax
+ movl 12(%eax), %eax
+ movl %eax, (%esp)
+ leal 2064(%esp), %ecx
+ movl 2596(%esp), %edx
+ calll .LmulPv544x32
+ movl 2132(%esp), %eax
+ movl 104(%esp), %edx # 4-byte Reload
+ addl 2064(%esp), %edx
+ movl 76(%esp), %ecx # 4-byte Reload
+ adcl 2068(%esp), %ecx
+ movl %ecx, 76(%esp) # 4-byte Spill
+ movl 84(%esp), %ecx # 4-byte Reload
+ adcl 2072(%esp), %ecx
+ movl %ecx, 84(%esp) # 4-byte Spill
+ movl 80(%esp), %ecx # 4-byte Reload
+ adcl 2076(%esp), %ecx
+ movl %ecx, 80(%esp) # 4-byte Spill
+ movl 60(%esp), %ecx # 4-byte Reload
+ adcl 2080(%esp), %ecx
+ movl %ecx, 60(%esp) # 4-byte Spill
+ movl 52(%esp), %ecx # 4-byte Reload
+ adcl 2084(%esp), %ecx
+ movl %ecx, 52(%esp) # 4-byte Spill
+ movl 56(%esp), %ebp # 4-byte Reload
+ adcl 2088(%esp), %ebp
+ movl 68(%esp), %ecx # 4-byte Reload
+ adcl 2092(%esp), %ecx
+ movl %ecx, 68(%esp) # 4-byte Spill
+ movl 72(%esp), %ecx # 4-byte Reload
+ adcl 2096(%esp), %ecx
+ movl %ecx, 72(%esp) # 4-byte Spill
+ movl 64(%esp), %ecx # 4-byte Reload
+ adcl 2100(%esp), %ecx
+ movl %ecx, 64(%esp) # 4-byte Spill
+ movl 88(%esp), %ecx # 4-byte Reload
+ adcl 2104(%esp), %ecx
+ movl %ecx, 88(%esp) # 4-byte Spill
+ movl 96(%esp), %ecx # 4-byte Reload
+ adcl 2108(%esp), %ecx
+ movl %ecx, 96(%esp) # 4-byte Spill
+ adcl 2112(%esp), %edi
+ movl 100(%esp), %ecx # 4-byte Reload
+ adcl 2116(%esp), %ecx
+ movl %ecx, 100(%esp) # 4-byte Spill
+ movl 116(%esp), %ecx # 4-byte Reload
+ adcl 2120(%esp), %ecx
+ movl %ecx, 116(%esp) # 4-byte Spill
+ adcl 2124(%esp), %esi
+ movl %esi, 112(%esp) # 4-byte Spill
+ movl 92(%esp), %ecx # 4-byte Reload
+ adcl 2128(%esp), %ecx
+ movl %ecx, 92(%esp) # 4-byte Spill
+ adcl $0, %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl %edx, %esi
+ movl %esi, %eax
+ imull 48(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 1992(%esp), %ecx
+ movl 2604(%esp), %edx
+ calll .LmulPv544x32
+ addl 1992(%esp), %esi
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 1996(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 2000(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 2004(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 2008(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 2012(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ adcl 2016(%esp), %ebp
+ movl %ebp, 56(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 2020(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 2024(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 2028(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 2032(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 2036(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ adcl 2040(%esp), %edi
+ movl %edi, 108(%esp) # 4-byte Spill
+ movl 100(%esp), %esi # 4-byte Reload
+ adcl 2044(%esp), %esi
+ movl 116(%esp), %eax # 4-byte Reload
+ adcl 2048(%esp), %eax
+ movl %eax, 116(%esp) # 4-byte Spill
+ movl 112(%esp), %eax # 4-byte Reload
+ adcl 2052(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 92(%esp), %edi # 4-byte Reload
+ adcl 2056(%esp), %edi
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 2060(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 2600(%esp), %eax
+ movl 16(%eax), %eax
+ movl %eax, (%esp)
+ leal 1920(%esp), %ecx
+ movl 2596(%esp), %edx
+ calll .LmulPv544x32
+ movl 1988(%esp), %eax
+ movl 76(%esp), %edx # 4-byte Reload
+ addl 1920(%esp), %edx
+ movl 84(%esp), %ecx # 4-byte Reload
+ adcl 1924(%esp), %ecx
+ movl %ecx, 84(%esp) # 4-byte Spill
+ movl 80(%esp), %ecx # 4-byte Reload
+ adcl 1928(%esp), %ecx
+ movl %ecx, 80(%esp) # 4-byte Spill
+ movl 60(%esp), %ecx # 4-byte Reload
+ adcl 1932(%esp), %ecx
+ movl %ecx, 60(%esp) # 4-byte Spill
+ movl 52(%esp), %ebp # 4-byte Reload
+ adcl 1936(%esp), %ebp
+ movl 56(%esp), %ecx # 4-byte Reload
+ adcl 1940(%esp), %ecx
+ movl %ecx, 56(%esp) # 4-byte Spill
+ movl 68(%esp), %ecx # 4-byte Reload
+ adcl 1944(%esp), %ecx
+ movl %ecx, 68(%esp) # 4-byte Spill
+ movl 72(%esp), %ecx # 4-byte Reload
+ adcl 1948(%esp), %ecx
+ movl %ecx, 72(%esp) # 4-byte Spill
+ movl 64(%esp), %ecx # 4-byte Reload
+ adcl 1952(%esp), %ecx
+ movl %ecx, 64(%esp) # 4-byte Spill
+ movl 88(%esp), %ecx # 4-byte Reload
+ adcl 1956(%esp), %ecx
+ movl %ecx, 88(%esp) # 4-byte Spill
+ movl 96(%esp), %ecx # 4-byte Reload
+ adcl 1960(%esp), %ecx
+ movl %ecx, 96(%esp) # 4-byte Spill
+ movl 108(%esp), %ecx # 4-byte Reload
+ adcl 1964(%esp), %ecx
+ movl %ecx, 108(%esp) # 4-byte Spill
+ adcl 1968(%esp), %esi
+ movl %esi, 100(%esp) # 4-byte Spill
+ movl 116(%esp), %ecx # 4-byte Reload
+ adcl 1972(%esp), %ecx
+ movl %ecx, 116(%esp) # 4-byte Spill
+ movl 112(%esp), %esi # 4-byte Reload
+ adcl 1976(%esp), %esi
+ adcl 1980(%esp), %edi
+ movl %edi, 92(%esp) # 4-byte Spill
+ movl 104(%esp), %ecx # 4-byte Reload
+ adcl 1984(%esp), %ecx
+ movl %ecx, 104(%esp) # 4-byte Spill
+ adcl $0, %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl %edx, %eax
+ movl %edx, %edi
+ imull 48(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 1848(%esp), %ecx
+ movl 2604(%esp), %edx
+ calll .LmulPv544x32
+ addl 1848(%esp), %edi
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 1852(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 1856(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 1860(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ adcl 1864(%esp), %ebp
+ movl %ebp, 52(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 1868(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 1872(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 1876(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 1880(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 1884(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 1888(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 1892(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 1896(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 116(%esp), %eax # 4-byte Reload
+ adcl 1900(%esp), %eax
+ movl %eax, 116(%esp) # 4-byte Spill
+ adcl 1904(%esp), %esi
+ movl %esi, 112(%esp) # 4-byte Spill
+ movl 92(%esp), %ebp # 4-byte Reload
+ adcl 1908(%esp), %ebp
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 1912(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 1916(%esp), %eax
+ movl %eax, %edi
+ movl 2600(%esp), %eax
+ movl 20(%eax), %eax
+ movl %eax, (%esp)
+ leal 1776(%esp), %ecx
+ movl 2596(%esp), %edx
+ calll .LmulPv544x32
+ movl 1844(%esp), %eax
+ movl 84(%esp), %edx # 4-byte Reload
+ addl 1776(%esp), %edx
+ movl 80(%esp), %ecx # 4-byte Reload
+ adcl 1780(%esp), %ecx
+ movl %ecx, 80(%esp) # 4-byte Spill
+ movl 60(%esp), %ecx # 4-byte Reload
+ adcl 1784(%esp), %ecx
+ movl %ecx, 60(%esp) # 4-byte Spill
+ movl 52(%esp), %ecx # 4-byte Reload
+ adcl 1788(%esp), %ecx
+ movl %ecx, 52(%esp) # 4-byte Spill
+ movl 56(%esp), %ecx # 4-byte Reload
+ adcl 1792(%esp), %ecx
+ movl %ecx, 56(%esp) # 4-byte Spill
+ movl 68(%esp), %ecx # 4-byte Reload
+ adcl 1796(%esp), %ecx
+ movl %ecx, 68(%esp) # 4-byte Spill
+ movl 72(%esp), %ecx # 4-byte Reload
+ adcl 1800(%esp), %ecx
+ movl %ecx, 72(%esp) # 4-byte Spill
+ movl 64(%esp), %ecx # 4-byte Reload
+ adcl 1804(%esp), %ecx
+ movl %ecx, 64(%esp) # 4-byte Spill
+ movl 88(%esp), %esi # 4-byte Reload
+ adcl 1808(%esp), %esi
+ movl 96(%esp), %ecx # 4-byte Reload
+ adcl 1812(%esp), %ecx
+ movl %ecx, 96(%esp) # 4-byte Spill
+ movl 108(%esp), %ecx # 4-byte Reload
+ adcl 1816(%esp), %ecx
+ movl %ecx, 108(%esp) # 4-byte Spill
+ movl 100(%esp), %ecx # 4-byte Reload
+ adcl 1820(%esp), %ecx
+ movl %ecx, 100(%esp) # 4-byte Spill
+ movl 116(%esp), %ecx # 4-byte Reload
+ adcl 1824(%esp), %ecx
+ movl %ecx, 116(%esp) # 4-byte Spill
+ movl 112(%esp), %ecx # 4-byte Reload
+ adcl 1828(%esp), %ecx
+ movl %ecx, 112(%esp) # 4-byte Spill
+ adcl 1832(%esp), %ebp
+ movl %ebp, 92(%esp) # 4-byte Spill
+ movl 104(%esp), %ecx # 4-byte Reload
+ adcl 1836(%esp), %ecx
+ movl %ecx, 104(%esp) # 4-byte Spill
+ adcl 1840(%esp), %edi
+ adcl $0, %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl %edx, %ebp
+ movl %ebp, %eax
+ imull 48(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 1704(%esp), %ecx
+ movl 2604(%esp), %edx
+ calll .LmulPv544x32
+ addl 1704(%esp), %ebp
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 1708(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 1712(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 1716(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 1720(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 1724(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 1728(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 1732(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ adcl 1736(%esp), %esi
+ movl %esi, %ebp
+ movl 96(%esp), %esi # 4-byte Reload
+ adcl 1740(%esp), %esi
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 1744(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 1748(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 116(%esp), %eax # 4-byte Reload
+ adcl 1752(%esp), %eax
+ movl %eax, 116(%esp) # 4-byte Spill
+ movl 112(%esp), %eax # 4-byte Reload
+ adcl 1756(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 1760(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 1764(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ adcl 1768(%esp), %edi
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 1772(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 2600(%esp), %eax
+ movl 24(%eax), %eax
+ movl %eax, (%esp)
+ leal 1632(%esp), %ecx
+ movl 2596(%esp), %eax
+ movl %eax, %edx
+ calll .LmulPv544x32
+ movl 1700(%esp), %eax
+ movl 80(%esp), %edx # 4-byte Reload
+ addl 1632(%esp), %edx
+ movl 60(%esp), %ecx # 4-byte Reload
+ adcl 1636(%esp), %ecx
+ movl %ecx, 60(%esp) # 4-byte Spill
+ movl 52(%esp), %ecx # 4-byte Reload
+ adcl 1640(%esp), %ecx
+ movl %ecx, 52(%esp) # 4-byte Spill
+ movl 56(%esp), %ecx # 4-byte Reload
+ adcl 1644(%esp), %ecx
+ movl %ecx, 56(%esp) # 4-byte Spill
+ movl 68(%esp), %ecx # 4-byte Reload
+ adcl 1648(%esp), %ecx
+ movl %ecx, 68(%esp) # 4-byte Spill
+ movl 72(%esp), %ecx # 4-byte Reload
+ adcl 1652(%esp), %ecx
+ movl %ecx, 72(%esp) # 4-byte Spill
+ movl 64(%esp), %ecx # 4-byte Reload
+ adcl 1656(%esp), %ecx
+ movl %ecx, 64(%esp) # 4-byte Spill
+ adcl 1660(%esp), %ebp
+ movl %ebp, 88(%esp) # 4-byte Spill
+ adcl 1664(%esp), %esi
+ movl %esi, 96(%esp) # 4-byte Spill
+ movl 108(%esp), %ecx # 4-byte Reload
+ adcl 1668(%esp), %ecx
+ movl %ecx, 108(%esp) # 4-byte Spill
+ movl 100(%esp), %esi # 4-byte Reload
+ adcl 1672(%esp), %esi
+ movl 116(%esp), %ecx # 4-byte Reload
+ adcl 1676(%esp), %ecx
+ movl %ecx, 116(%esp) # 4-byte Spill
+ movl 112(%esp), %ecx # 4-byte Reload
+ adcl 1680(%esp), %ecx
+ movl %ecx, 112(%esp) # 4-byte Spill
+ movl 92(%esp), %ecx # 4-byte Reload
+ adcl 1684(%esp), %ecx
+ movl %ecx, 92(%esp) # 4-byte Spill
+ movl 104(%esp), %ecx # 4-byte Reload
+ adcl 1688(%esp), %ecx
+ movl %ecx, 104(%esp) # 4-byte Spill
+ adcl 1692(%esp), %edi
+ movl %edi, 76(%esp) # 4-byte Spill
+ movl 84(%esp), %ecx # 4-byte Reload
+ adcl 1696(%esp), %ecx
+ movl %ecx, 84(%esp) # 4-byte Spill
+ adcl $0, %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl %edx, %edi
+ movl %edi, %eax
+ imull 48(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 1560(%esp), %ecx
+ movl 2604(%esp), %edx
+ calll .LmulPv544x32
+ addl 1560(%esp), %edi
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 1564(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 1568(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 1572(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 1576(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 72(%esp), %edi # 4-byte Reload
+ adcl 1580(%esp), %edi
+ movl 64(%esp), %ebp # 4-byte Reload
+ adcl 1584(%esp), %ebp
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 1588(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 1592(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 1596(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ adcl 1600(%esp), %esi
+ movl %esi, 100(%esp) # 4-byte Spill
+ movl 116(%esp), %eax # 4-byte Reload
+ adcl 1604(%esp), %eax
+ movl %eax, 116(%esp) # 4-byte Spill
+ movl 112(%esp), %esi # 4-byte Reload
+ adcl 1608(%esp), %esi
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 1612(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 1616(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 1620(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 1624(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 1628(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 2600(%esp), %eax
+ movl 28(%eax), %eax
+ movl %eax, (%esp)
+ leal 1488(%esp), %ecx
+ movl 2596(%esp), %edx
+ calll .LmulPv544x32
+ movl 1556(%esp), %eax
+ movl 60(%esp), %ecx # 4-byte Reload
+ addl 1488(%esp), %ecx
+ movl 52(%esp), %edx # 4-byte Reload
+ adcl 1492(%esp), %edx
+ movl %edx, 52(%esp) # 4-byte Spill
+ movl 56(%esp), %edx # 4-byte Reload
+ adcl 1496(%esp), %edx
+ movl %edx, 56(%esp) # 4-byte Spill
+ movl 68(%esp), %edx # 4-byte Reload
+ adcl 1500(%esp), %edx
+ movl %edx, 68(%esp) # 4-byte Spill
+ adcl 1504(%esp), %edi
+ adcl 1508(%esp), %ebp
+ movl %ebp, 64(%esp) # 4-byte Spill
+ movl 88(%esp), %edx # 4-byte Reload
+ adcl 1512(%esp), %edx
+ movl %edx, 88(%esp) # 4-byte Spill
+ movl 96(%esp), %edx # 4-byte Reload
+ adcl 1516(%esp), %edx
+ movl %edx, 96(%esp) # 4-byte Spill
+ movl 108(%esp), %edx # 4-byte Reload
+ adcl 1520(%esp), %edx
+ movl %edx, 108(%esp) # 4-byte Spill
+ movl 100(%esp), %edx # 4-byte Reload
+ adcl 1524(%esp), %edx
+ movl %edx, 100(%esp) # 4-byte Spill
+ movl 116(%esp), %edx # 4-byte Reload
+ adcl 1528(%esp), %edx
+ movl %edx, 116(%esp) # 4-byte Spill
+ adcl 1532(%esp), %esi
+ movl %esi, %ebp
+ movl 92(%esp), %edx # 4-byte Reload
+ adcl 1536(%esp), %edx
+ movl %edx, 92(%esp) # 4-byte Spill
+ movl 104(%esp), %edx # 4-byte Reload
+ adcl 1540(%esp), %edx
+ movl %edx, 104(%esp) # 4-byte Spill
+ movl 76(%esp), %edx # 4-byte Reload
+ adcl 1544(%esp), %edx
+ movl %edx, 76(%esp) # 4-byte Spill
+ movl 84(%esp), %edx # 4-byte Reload
+ adcl 1548(%esp), %edx
+ movl %edx, 84(%esp) # 4-byte Spill
+ movl 80(%esp), %edx # 4-byte Reload
+ adcl 1552(%esp), %edx
+ movl %edx, 80(%esp) # 4-byte Spill
+ adcl $0, %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl %ecx, %esi
+ movl %esi, %eax
+ imull 48(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 1416(%esp), %ecx
+ movl 2604(%esp), %edx
+ calll .LmulPv544x32
+ addl 1416(%esp), %esi
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 1420(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 1424(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 68(%esp), %esi # 4-byte Reload
+ adcl 1428(%esp), %esi
+ adcl 1432(%esp), %edi
+ movl %edi, 72(%esp) # 4-byte Spill
+ movl 64(%esp), %edi # 4-byte Reload
+ adcl 1436(%esp), %edi
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 1440(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 1444(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 1448(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 1452(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 116(%esp), %eax # 4-byte Reload
+ adcl 1456(%esp), %eax
+ movl %eax, 116(%esp) # 4-byte Spill
+ adcl 1460(%esp), %ebp
+ movl %ebp, 112(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 1464(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 1468(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 1472(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 1476(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 1480(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 60(%esp), %ebp # 4-byte Reload
+ adcl 1484(%esp), %ebp
+ movl 2600(%esp), %eax
+ movl 32(%eax), %eax
+ movl %eax, (%esp)
+ leal 1344(%esp), %ecx
+ movl 2596(%esp), %edx
+ calll .LmulPv544x32
+ movl 1412(%esp), %eax
+ movl 52(%esp), %edx # 4-byte Reload
+ addl 1344(%esp), %edx
+ movl 56(%esp), %ecx # 4-byte Reload
+ adcl 1348(%esp), %ecx
+ movl %ecx, 56(%esp) # 4-byte Spill
+ adcl 1352(%esp), %esi
+ movl %esi, 68(%esp) # 4-byte Spill
+ movl 72(%esp), %ecx # 4-byte Reload
+ adcl 1356(%esp), %ecx
+ movl %ecx, 72(%esp) # 4-byte Spill
+ adcl 1360(%esp), %edi
+ movl 88(%esp), %ecx # 4-byte Reload
+ adcl 1364(%esp), %ecx
+ movl %ecx, 88(%esp) # 4-byte Spill
+ movl 96(%esp), %ecx # 4-byte Reload
+ adcl 1368(%esp), %ecx
+ movl %ecx, 96(%esp) # 4-byte Spill
+ movl 108(%esp), %ecx # 4-byte Reload
+ adcl 1372(%esp), %ecx
+ movl %ecx, 108(%esp) # 4-byte Spill
+ movl 100(%esp), %ecx # 4-byte Reload
+ adcl 1376(%esp), %ecx
+ movl %ecx, 100(%esp) # 4-byte Spill
+ movl 116(%esp), %ecx # 4-byte Reload
+ adcl 1380(%esp), %ecx
+ movl %ecx, 116(%esp) # 4-byte Spill
+ movl 112(%esp), %ecx # 4-byte Reload
+ adcl 1384(%esp), %ecx
+ movl %ecx, 112(%esp) # 4-byte Spill
+ movl 92(%esp), %ecx # 4-byte Reload
+ adcl 1388(%esp), %ecx
+ movl %ecx, 92(%esp) # 4-byte Spill
+ movl 104(%esp), %ecx # 4-byte Reload
+ adcl 1392(%esp), %ecx
+ movl %ecx, 104(%esp) # 4-byte Spill
+ movl 76(%esp), %esi # 4-byte Reload
+ adcl 1396(%esp), %esi
+ movl 84(%esp), %ecx # 4-byte Reload
+ adcl 1400(%esp), %ecx
+ movl %ecx, 84(%esp) # 4-byte Spill
+ movl 80(%esp), %ecx # 4-byte Reload
+ adcl 1404(%esp), %ecx
+ movl %ecx, 80(%esp) # 4-byte Spill
+ adcl 1408(%esp), %ebp
+ movl %ebp, 60(%esp) # 4-byte Spill
+ adcl $0, %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl %edx, %ebp
+ movl %ebp, %eax
+ imull 48(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 1272(%esp), %ecx
+ movl 2604(%esp), %edx
+ calll .LmulPv544x32
+ addl 1272(%esp), %ebp
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 1276(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 1280(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 1284(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ adcl 1288(%esp), %edi
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 1292(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 1296(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 1300(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 100(%esp), %ebp # 4-byte Reload
+ adcl 1304(%esp), %ebp
+ movl 116(%esp), %eax # 4-byte Reload
+ adcl 1308(%esp), %eax
+ movl %eax, 116(%esp) # 4-byte Spill
+ movl 112(%esp), %eax # 4-byte Reload
+ adcl 1312(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 1316(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 1320(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ adcl 1324(%esp), %esi
+ movl %esi, 76(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 1328(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 1332(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 1336(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 1340(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 2600(%esp), %eax
+ movl 36(%eax), %eax
+ movl %eax, (%esp)
+ leal 1200(%esp), %ecx
+ movl 2596(%esp), %edx
+ calll .LmulPv544x32
+ movl 1268(%esp), %eax
+ movl 56(%esp), %ecx # 4-byte Reload
+ addl 1200(%esp), %ecx
+ movl 68(%esp), %edx # 4-byte Reload
+ adcl 1204(%esp), %edx
+ movl %edx, 68(%esp) # 4-byte Spill
+ movl 72(%esp), %edx # 4-byte Reload
+ adcl 1208(%esp), %edx
+ movl %edx, 72(%esp) # 4-byte Spill
+ adcl 1212(%esp), %edi
+ movl %edi, 64(%esp) # 4-byte Spill
+ movl 88(%esp), %edx # 4-byte Reload
+ adcl 1216(%esp), %edx
+ movl %edx, 88(%esp) # 4-byte Spill
+ movl 96(%esp), %edx # 4-byte Reload
+ adcl 1220(%esp), %edx
+ movl %edx, 96(%esp) # 4-byte Spill
+ movl 108(%esp), %esi # 4-byte Reload
+ adcl 1224(%esp), %esi
+ adcl 1228(%esp), %ebp
+ movl %ebp, 100(%esp) # 4-byte Spill
+ movl 116(%esp), %edi # 4-byte Reload
+ adcl 1232(%esp), %edi
+ movl 112(%esp), %edx # 4-byte Reload
+ adcl 1236(%esp), %edx
+ movl %edx, 112(%esp) # 4-byte Spill
+ movl 92(%esp), %edx # 4-byte Reload
+ adcl 1240(%esp), %edx
+ movl %edx, 92(%esp) # 4-byte Spill
+ movl 104(%esp), %edx # 4-byte Reload
+ adcl 1244(%esp), %edx
+ movl %edx, 104(%esp) # 4-byte Spill
+ movl 76(%esp), %edx # 4-byte Reload
+ adcl 1248(%esp), %edx
+ movl %edx, 76(%esp) # 4-byte Spill
+ movl 84(%esp), %edx # 4-byte Reload
+ adcl 1252(%esp), %edx
+ movl %edx, 84(%esp) # 4-byte Spill
+ movl 80(%esp), %edx # 4-byte Reload
+ adcl 1256(%esp), %edx
+ movl %edx, 80(%esp) # 4-byte Spill
+ movl 60(%esp), %edx # 4-byte Reload
+ adcl 1260(%esp), %edx
+ movl %edx, 60(%esp) # 4-byte Spill
+ movl 52(%esp), %edx # 4-byte Reload
+ adcl 1264(%esp), %edx
+ movl %edx, 52(%esp) # 4-byte Spill
+ adcl $0, %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ movl %ecx, %ebp
+ imull 48(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 1128(%esp), %ecx
+ movl 2604(%esp), %edx
+ calll .LmulPv544x32
+ addl 1128(%esp), %ebp
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 1132(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 1136(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 1140(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 1144(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 1148(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ adcl 1152(%esp), %esi
+ movl %esi, 108(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 1156(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ adcl 1160(%esp), %edi
+ movl %edi, 116(%esp) # 4-byte Spill
+ movl 112(%esp), %eax # 4-byte Reload
+ adcl 1164(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 1168(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 104(%esp), %ebp # 4-byte Reload
+ adcl 1172(%esp), %ebp
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 1176(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 84(%esp), %esi # 4-byte Reload
+ adcl 1180(%esp), %esi
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 1184(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 60(%esp), %edi # 4-byte Reload
+ adcl 1188(%esp), %edi
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 1192(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 1196(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 2600(%esp), %eax
+ movl 40(%eax), %eax
+ movl %eax, (%esp)
+ leal 1056(%esp), %ecx
+ movl 2596(%esp), %edx
+ calll .LmulPv544x32
+ movl 1124(%esp), %edx
+ movl 68(%esp), %eax # 4-byte Reload
+ addl 1056(%esp), %eax
+ movl 72(%esp), %ecx # 4-byte Reload
+ adcl 1060(%esp), %ecx
+ movl %ecx, 72(%esp) # 4-byte Spill
+ movl 64(%esp), %ecx # 4-byte Reload
+ adcl 1064(%esp), %ecx
+ movl %ecx, 64(%esp) # 4-byte Spill
+ movl 88(%esp), %ecx # 4-byte Reload
+ adcl 1068(%esp), %ecx
+ movl %ecx, 88(%esp) # 4-byte Spill
+ movl 96(%esp), %ecx # 4-byte Reload
+ adcl 1072(%esp), %ecx
+ movl %ecx, 96(%esp) # 4-byte Spill
+ movl 108(%esp), %ecx # 4-byte Reload
+ adcl 1076(%esp), %ecx
+ movl %ecx, 108(%esp) # 4-byte Spill
+ movl 100(%esp), %ecx # 4-byte Reload
+ adcl 1080(%esp), %ecx
+ movl %ecx, 100(%esp) # 4-byte Spill
+ movl 116(%esp), %ecx # 4-byte Reload
+ adcl 1084(%esp), %ecx
+ movl %ecx, 116(%esp) # 4-byte Spill
+ movl 112(%esp), %ecx # 4-byte Reload
+ adcl 1088(%esp), %ecx
+ movl %ecx, 112(%esp) # 4-byte Spill
+ movl 92(%esp), %ecx # 4-byte Reload
+ adcl 1092(%esp), %ecx
+ movl %ecx, 92(%esp) # 4-byte Spill
+ adcl 1096(%esp), %ebp
+ movl %ebp, 104(%esp) # 4-byte Spill
+ movl 76(%esp), %ecx # 4-byte Reload
+ adcl 1100(%esp), %ecx
+ movl %ecx, 76(%esp) # 4-byte Spill
+ adcl 1104(%esp), %esi
+ movl %esi, 84(%esp) # 4-byte Spill
+ movl 80(%esp), %ecx # 4-byte Reload
+ adcl 1108(%esp), %ecx
+ movl %ecx, 80(%esp) # 4-byte Spill
+ adcl 1112(%esp), %edi
+ movl %edi, 60(%esp) # 4-byte Spill
+ movl 52(%esp), %edi # 4-byte Reload
+ adcl 1116(%esp), %edi
+ movl 56(%esp), %ecx # 4-byte Reload
+ adcl 1120(%esp), %ecx
+ movl %ecx, 56(%esp) # 4-byte Spill
+ adcl $0, %edx
+ movl %edx, 68(%esp) # 4-byte Spill
+ movl %eax, %esi
+ imull 48(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 984(%esp), %ecx
+ movl 2604(%esp), %edx
+ calll .LmulPv544x32
+ addl 984(%esp), %esi
+ movl 72(%esp), %esi # 4-byte Reload
+ adcl 988(%esp), %esi
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 992(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 88(%esp), %ebp # 4-byte Reload
+ adcl 996(%esp), %ebp
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 1000(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 1004(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 1008(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 116(%esp), %eax # 4-byte Reload
+ adcl 1012(%esp), %eax
+ movl %eax, 116(%esp) # 4-byte Spill
+ movl 112(%esp), %eax # 4-byte Reload
+ adcl 1016(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 1020(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 1024(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 1028(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 1032(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 1036(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 1040(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ adcl 1044(%esp), %edi
+ movl %edi, 52(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 1048(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 1052(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 2600(%esp), %eax
+ movl 44(%eax), %eax
+ movl %eax, (%esp)
+ leal 912(%esp), %ecx
+ movl 2596(%esp), %edx
+ calll .LmulPv544x32
+ movl 980(%esp), %eax
+ addl 912(%esp), %esi
+ movl 64(%esp), %edx # 4-byte Reload
+ adcl 916(%esp), %edx
+ movl %edx, 64(%esp) # 4-byte Spill
+ adcl 920(%esp), %ebp
+ movl %ebp, 88(%esp) # 4-byte Spill
+ movl 96(%esp), %edi # 4-byte Reload
+ adcl 924(%esp), %edi
+ movl 108(%esp), %edx # 4-byte Reload
+ adcl 928(%esp), %edx
+ movl %edx, 108(%esp) # 4-byte Spill
+ movl 100(%esp), %edx # 4-byte Reload
+ adcl 932(%esp), %edx
+ movl %edx, 100(%esp) # 4-byte Spill
+ movl 116(%esp), %edx # 4-byte Reload
+ adcl 936(%esp), %edx
+ movl %edx, 116(%esp) # 4-byte Spill
+ movl 112(%esp), %edx # 4-byte Reload
+ adcl 940(%esp), %edx
+ movl %edx, 112(%esp) # 4-byte Spill
+ movl 92(%esp), %ebp # 4-byte Reload
+ adcl 944(%esp), %ebp
+ movl 104(%esp), %edx # 4-byte Reload
+ adcl 948(%esp), %edx
+ movl %edx, 104(%esp) # 4-byte Spill
+ movl 76(%esp), %edx # 4-byte Reload
+ adcl 952(%esp), %edx
+ movl %edx, 76(%esp) # 4-byte Spill
+ movl 84(%esp), %edx # 4-byte Reload
+ adcl 956(%esp), %edx
+ movl %edx, 84(%esp) # 4-byte Spill
+ movl 80(%esp), %edx # 4-byte Reload
+ adcl 960(%esp), %edx
+ movl %edx, 80(%esp) # 4-byte Spill
+ movl 60(%esp), %edx # 4-byte Reload
+ adcl 964(%esp), %edx
+ movl %edx, 60(%esp) # 4-byte Spill
+ movl 52(%esp), %edx # 4-byte Reload
+ adcl 968(%esp), %edx
+ movl %edx, 52(%esp) # 4-byte Spill
+ movl 56(%esp), %edx # 4-byte Reload
+ adcl 972(%esp), %edx
+ movl %edx, 56(%esp) # 4-byte Spill
+ movl 68(%esp), %edx # 4-byte Reload
+ adcl 976(%esp), %edx
+ movl %edx, 68(%esp) # 4-byte Spill
+ adcl $0, %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl %esi, %eax
+ imull 48(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 840(%esp), %ecx
+ movl 2604(%esp), %edx
+ calll .LmulPv544x32
+ addl 840(%esp), %esi
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 844(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 848(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ adcl 852(%esp), %edi
+ movl %edi, 96(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 856(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 860(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 116(%esp), %edi # 4-byte Reload
+ adcl 864(%esp), %edi
+ movl 112(%esp), %eax # 4-byte Reload
+ adcl 868(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl %ebp, %esi
+ adcl 872(%esp), %esi
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 876(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 880(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 884(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 80(%esp), %ebp # 4-byte Reload
+ adcl 888(%esp), %ebp
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 892(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 896(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 900(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 904(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 908(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 2600(%esp), %eax
+ movl 48(%eax), %eax
+ movl %eax, (%esp)
+ leal 768(%esp), %ecx
+ movl 2596(%esp), %edx
+ calll .LmulPv544x32
+ movl 836(%esp), %edx
+ movl 64(%esp), %ecx # 4-byte Reload
+ addl 768(%esp), %ecx
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 772(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 776(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 780(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 784(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ adcl 788(%esp), %edi
+ movl %edi, 116(%esp) # 4-byte Spill
+ movl 112(%esp), %edi # 4-byte Reload
+ adcl 792(%esp), %edi
+ adcl 796(%esp), %esi
+ movl %esi, 92(%esp) # 4-byte Spill
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 800(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 804(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 808(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ adcl 812(%esp), %ebp
+ movl %ebp, 80(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 816(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 820(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 824(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 68(%esp), %ebp # 4-byte Reload
+ adcl 828(%esp), %ebp
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 832(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ adcl $0, %edx
+ movl %edx, 64(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ movl %ecx, %esi
+ imull 48(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 696(%esp), %ecx
+ movl 2604(%esp), %edx
+ calll .LmulPv544x32
+ addl 696(%esp), %esi
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 700(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 704(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 708(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 712(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 116(%esp), %esi # 4-byte Reload
+ adcl 716(%esp), %esi
+ adcl 720(%esp), %edi
+ movl %edi, 112(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 724(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 728(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 76(%esp), %edi # 4-byte Reload
+ adcl 732(%esp), %edi
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 736(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 740(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 744(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 748(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 752(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ adcl 756(%esp), %ebp
+ movl %ebp, 68(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 760(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 764(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 2600(%esp), %eax
+ movl 52(%eax), %eax
+ movl %eax, (%esp)
+ leal 624(%esp), %ecx
+ movl 2596(%esp), %edx
+ calll .LmulPv544x32
+ movl 692(%esp), %edx
+ movl 88(%esp), %ecx # 4-byte Reload
+ addl 624(%esp), %ecx
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 628(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 632(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 100(%esp), %ebp # 4-byte Reload
+ adcl 636(%esp), %ebp
+ adcl 640(%esp), %esi
+ movl %esi, 116(%esp) # 4-byte Spill
+ movl 112(%esp), %eax # 4-byte Reload
+ adcl 644(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 648(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 652(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ adcl 656(%esp), %edi
+ movl %edi, 76(%esp) # 4-byte Spill
+ movl 84(%esp), %edi # 4-byte Reload
+ adcl 660(%esp), %edi
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 664(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 668(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 672(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 676(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 680(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 684(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 688(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ adcl $0, %edx
+ movl %edx, 88(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ movl %ecx, %esi
+ imull 48(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 552(%esp), %ecx
+ movl 2604(%esp), %edx
+ calll .LmulPv544x32
+ addl 552(%esp), %esi
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 556(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 560(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ adcl 564(%esp), %ebp
+ movl %ebp, 100(%esp) # 4-byte Spill
+ movl 116(%esp), %eax # 4-byte Reload
+ adcl 568(%esp), %eax
+ movl %eax, 116(%esp) # 4-byte Spill
+ movl 112(%esp), %eax # 4-byte Reload
+ adcl 572(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 92(%esp), %esi # 4-byte Reload
+ adcl 576(%esp), %esi
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 580(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 584(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ adcl 588(%esp), %edi
+ movl %edi, 84(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 592(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 596(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 52(%esp), %ebp # 4-byte Reload
+ adcl 600(%esp), %ebp
+ movl 56(%esp), %edi # 4-byte Reload
+ adcl 604(%esp), %edi
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 608(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 612(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 616(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 620(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 2600(%esp), %eax
+ movl 56(%eax), %eax
+ movl %eax, (%esp)
+ leal 480(%esp), %ecx
+ movl 2596(%esp), %edx
+ calll .LmulPv544x32
+ movl 548(%esp), %edx
+ movl 96(%esp), %ecx # 4-byte Reload
+ addl 480(%esp), %ecx
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 484(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 488(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 116(%esp), %eax # 4-byte Reload
+ adcl 492(%esp), %eax
+ movl %eax, 116(%esp) # 4-byte Spill
+ movl 112(%esp), %eax # 4-byte Reload
+ adcl 496(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ adcl 500(%esp), %esi
+ movl %esi, 92(%esp) # 4-byte Spill
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 504(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 508(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 512(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 516(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 520(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ adcl 524(%esp), %ebp
+ movl %ebp, 52(%esp) # 4-byte Spill
+ adcl 528(%esp), %edi
+ movl %edi, 56(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 532(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 536(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 540(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 544(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ adcl $0, %edx
+ movl %edx, 96(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ movl %ecx, %esi
+ imull 48(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 408(%esp), %ecx
+ movl 2604(%esp), %edx
+ calll .LmulPv544x32
+ addl 408(%esp), %esi
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 412(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 416(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 116(%esp), %esi # 4-byte Reload
+ adcl 420(%esp), %esi
+ movl 112(%esp), %eax # 4-byte Reload
+ adcl 424(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 428(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 104(%esp), %ebp # 4-byte Reload
+ adcl 432(%esp), %ebp
+ movl 76(%esp), %edi # 4-byte Reload
+ adcl 436(%esp), %edi
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 440(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 444(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 448(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 452(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 456(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 460(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 464(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 468(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 472(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 476(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 2600(%esp), %eax
+ movl 60(%eax), %eax
+ movl %eax, (%esp)
+ leal 336(%esp), %ecx
+ movl 2596(%esp), %edx
+ calll .LmulPv544x32
+ movl 404(%esp), %edx
+ movl 108(%esp), %ecx # 4-byte Reload
+ addl 336(%esp), %ecx
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 340(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ adcl 344(%esp), %esi
+ movl %esi, 116(%esp) # 4-byte Spill
+ movl 112(%esp), %eax # 4-byte Reload
+ adcl 348(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 352(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ adcl 356(%esp), %ebp
+ movl %ebp, 104(%esp) # 4-byte Spill
+ adcl 360(%esp), %edi
+ movl %edi, 76(%esp) # 4-byte Spill
+ movl 84(%esp), %edi # 4-byte Reload
+ adcl 364(%esp), %edi
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 368(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 60(%esp), %ebp # 4-byte Reload
+ adcl 372(%esp), %ebp
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 376(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ adcl 380(%esp), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ adcl 384(%esp), %eax
+ movl %eax, 68(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 388(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 392(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 396(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 400(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ adcl $0, %edx
+ movl %edx, 108(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ movl %ecx, %esi
+ imull 48(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 264(%esp), %ecx
+ movl 2604(%esp), %edx
+ calll .LmulPv544x32
+ addl 264(%esp), %esi
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 268(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 116(%esp), %eax # 4-byte Reload
+ adcl 272(%esp), %eax
+ movl %eax, 116(%esp) # 4-byte Spill
+ movl 112(%esp), %eax # 4-byte Reload
+ adcl 276(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 92(%esp), %esi # 4-byte Reload
+ adcl 280(%esp), %esi
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 284(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 288(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ adcl 292(%esp), %edi
+ movl %edi, 84(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 296(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ adcl 300(%esp), %ebp
+ movl %ebp, 60(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 304(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 56(%esp), %edi # 4-byte Reload
+ adcl 308(%esp), %edi
+ movl 68(%esp), %ebp # 4-byte Reload
+ adcl 312(%esp), %ebp
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 316(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 320(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 324(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 328(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 332(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 2600(%esp), %eax
+ movl 64(%eax), %eax
+ movl %eax, (%esp)
+ leal 192(%esp), %ecx
+ movl 2596(%esp), %edx
+ calll .LmulPv544x32
+ movl 260(%esp), %edx
+ movl 100(%esp), %ecx # 4-byte Reload
+ addl 192(%esp), %ecx
+ movl 116(%esp), %eax # 4-byte Reload
+ adcl 196(%esp), %eax
+ movl %eax, 116(%esp) # 4-byte Spill
+ movl 112(%esp), %eax # 4-byte Reload
+ adcl 200(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ adcl 204(%esp), %esi
+ movl %esi, 92(%esp) # 4-byte Spill
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 208(%esp), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 212(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 216(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 220(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ adcl 224(%esp), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ adcl 228(%esp), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ adcl 232(%esp), %edi
+ movl %edi, 56(%esp) # 4-byte Spill
+ adcl 236(%esp), %ebp
+ movl %ebp, 68(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 240(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ adcl 244(%esp), %eax
+ movl %eax, 64(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 248(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ adcl 252(%esp), %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 256(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ adcl $0, %edx
+ movl %edx, 100(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ imull %ecx, %eax
+ movl %ecx, %esi
+ movl %eax, (%esp)
+ leal 120(%esp), %ecx
+ movl 2604(%esp), %edx
+ calll .LmulPv544x32
+ addl 120(%esp), %esi
+ movl 92(%esp), %esi # 4-byte Reload
+ movl 116(%esp), %eax # 4-byte Reload
+ adcl 124(%esp), %eax
+ movl %eax, 116(%esp) # 4-byte Spill
+ movl 112(%esp), %ebp # 4-byte Reload
+ adcl 128(%esp), %ebp
+ movl %ebp, 112(%esp) # 4-byte Spill
+ adcl 132(%esp), %esi
+ movl 104(%esp), %edx # 4-byte Reload
+ adcl 136(%esp), %edx
+ movl %edx, 104(%esp) # 4-byte Spill
+ movl 76(%esp), %edx # 4-byte Reload
+ adcl 140(%esp), %edx
+ movl %edx, 76(%esp) # 4-byte Spill
+ movl 84(%esp), %edx # 4-byte Reload
+ adcl 144(%esp), %edx
+ movl %edx, 84(%esp) # 4-byte Spill
+ movl 80(%esp), %edx # 4-byte Reload
+ adcl 148(%esp), %edx
+ movl %edx, 80(%esp) # 4-byte Spill
+ movl 60(%esp), %edx # 4-byte Reload
+ adcl 152(%esp), %edx
+ movl %edx, 60(%esp) # 4-byte Spill
+ movl 52(%esp), %edx # 4-byte Reload
+ adcl 156(%esp), %edx
+ movl %edx, 52(%esp) # 4-byte Spill
+ movl 56(%esp), %edx # 4-byte Reload
+ adcl 160(%esp), %edx
+ movl %edx, 56(%esp) # 4-byte Spill
+ movl 68(%esp), %edx # 4-byte Reload
+ adcl 164(%esp), %edx
+ movl %edx, 68(%esp) # 4-byte Spill
+ movl 72(%esp), %edx # 4-byte Reload
+ adcl 168(%esp), %edx
+ movl %edx, 72(%esp) # 4-byte Spill
+ movl 64(%esp), %edx # 4-byte Reload
+ adcl 172(%esp), %edx
+ movl %edx, 64(%esp) # 4-byte Spill
+ movl 88(%esp), %edx # 4-byte Reload
+ adcl 176(%esp), %edx
+ movl %edx, 88(%esp) # 4-byte Spill
+ movl 96(%esp), %edx # 4-byte Reload
+ adcl 180(%esp), %edx
+ movl %edx, 96(%esp) # 4-byte Spill
+ movl 108(%esp), %edx # 4-byte Reload
+ adcl 184(%esp), %edx
+ movl %edx, 108(%esp) # 4-byte Spill
+ movl 100(%esp), %edx # 4-byte Reload
+ adcl 188(%esp), %edx
+ movl %edx, 100(%esp) # 4-byte Spill
+ movl %eax, %edx
+ movl 2604(%esp), %edi
+ subl (%edi), %edx
+ sbbl 4(%edi), %ebp
+ movl %esi, %ebx
+ sbbl 8(%edi), %ebx
+ movl 104(%esp), %ecx # 4-byte Reload
+ sbbl 12(%edi), %ecx
+ movl 76(%esp), %eax # 4-byte Reload
+ sbbl 16(%edi), %eax
+ movl %eax, 4(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ sbbl 20(%edi), %eax
+ movl %eax, 8(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ sbbl 24(%edi), %eax
+ movl %eax, 12(%esp) # 4-byte Spill
+ movl 60(%esp), %eax # 4-byte Reload
+ sbbl 28(%edi), %eax
+ movl %eax, 16(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ sbbl 32(%edi), %eax
+ movl %eax, 20(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ sbbl 36(%edi), %eax
+ movl %eax, 24(%esp) # 4-byte Spill
+ movl 68(%esp), %eax # 4-byte Reload
+ sbbl 40(%edi), %eax
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl 72(%esp), %eax # 4-byte Reload
+ sbbl 44(%edi), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 64(%esp), %eax # 4-byte Reload
+ sbbl 48(%edi), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ sbbl 52(%edi), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ sbbl 56(%edi), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ sbbl 60(%edi), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ sbbl 64(%edi), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ sarl $31, %eax
+ testl %eax, %eax
+ movl 116(%esp), %edi # 4-byte Reload
+ js .LBB259_2
+# BB#1:
+ movl %edx, %edi
+.LBB259_2:
+ movl 2592(%esp), %edx
+ movl %edi, (%edx)
+ movl 112(%esp), %edi # 4-byte Reload
+ js .LBB259_4
+# BB#3:
+ movl %ebp, %edi
+.LBB259_4:
+ movl %edi, 4(%edx)
+ js .LBB259_6
+# BB#5:
+ movl %ebx, %esi
+.LBB259_6:
+ movl %esi, 8(%edx)
+ movl 104(%esp), %esi # 4-byte Reload
+ js .LBB259_8
+# BB#7:
+ movl %ecx, %esi
+.LBB259_8:
+ movl %esi, 12(%edx)
+ movl 76(%esp), %ecx # 4-byte Reload
+ js .LBB259_10
+# BB#9:
+ movl 4(%esp), %ecx # 4-byte Reload
+.LBB259_10:
+ movl %ecx, 16(%edx)
+ movl 84(%esp), %eax # 4-byte Reload
+ js .LBB259_12
+# BB#11:
+ movl 8(%esp), %eax # 4-byte Reload
+.LBB259_12:
+ movl %eax, 20(%edx)
+ movl 80(%esp), %eax # 4-byte Reload
+ js .LBB259_14
+# BB#13:
+ movl 12(%esp), %eax # 4-byte Reload
+.LBB259_14:
+ movl %eax, 24(%edx)
+ movl 60(%esp), %eax # 4-byte Reload
+ js .LBB259_16
+# BB#15:
+ movl 16(%esp), %eax # 4-byte Reload
+.LBB259_16:
+ movl %eax, 28(%edx)
+ movl 52(%esp), %eax # 4-byte Reload
+ js .LBB259_18
+# BB#17:
+ movl 20(%esp), %eax # 4-byte Reload
+.LBB259_18:
+ movl %eax, 32(%edx)
+ movl 56(%esp), %eax # 4-byte Reload
+ js .LBB259_20
+# BB#19:
+ movl 24(%esp), %eax # 4-byte Reload
+.LBB259_20:
+ movl %eax, 36(%edx)
+ movl 68(%esp), %eax # 4-byte Reload
+ js .LBB259_22
+# BB#21:
+ movl 28(%esp), %eax # 4-byte Reload
+.LBB259_22:
+ movl %eax, 40(%edx)
+ movl 72(%esp), %eax # 4-byte Reload
+ js .LBB259_24
+# BB#23:
+ movl 32(%esp), %eax # 4-byte Reload
+.LBB259_24:
+ movl %eax, 44(%edx)
+ movl 64(%esp), %eax # 4-byte Reload
+ js .LBB259_26
+# BB#25:
+ movl 36(%esp), %eax # 4-byte Reload
+.LBB259_26:
+ movl %eax, 48(%edx)
+ movl 88(%esp), %eax # 4-byte Reload
+ js .LBB259_28
+# BB#27:
+ movl 40(%esp), %eax # 4-byte Reload
+.LBB259_28:
+ movl %eax, 52(%edx)
+ movl 96(%esp), %eax # 4-byte Reload
+ js .LBB259_30
+# BB#29:
+ movl 44(%esp), %eax # 4-byte Reload
+.LBB259_30:
+ movl %eax, 56(%edx)
+ movl 108(%esp), %eax # 4-byte Reload
+ js .LBB259_32
+# BB#31:
+ movl 48(%esp), %eax # 4-byte Reload
+.LBB259_32:
+ movl %eax, 60(%edx)
+ movl 100(%esp), %eax # 4-byte Reload
+ js .LBB259_34
+# BB#33:
+ movl 92(%esp), %eax # 4-byte Reload
+.LBB259_34:
+ movl %eax, 64(%edx)
+ addl $2572, %esp # imm = 0xA0C
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end259:
+ .size mcl_fp_montNF17L, .Lfunc_end259-mcl_fp_montNF17L
+
+ .globl mcl_fp_montRed17L
+ .align 16, 0x90
+ .type mcl_fp_montRed17L,@function
+mcl_fp_montRed17L: # @mcl_fp_montRed17L
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $1436, %esp # imm = 0x59C
+ calll .L260$pb
+.L260$pb:
+ popl %eax
+.Ltmp61:
+ addl $_GLOBAL_OFFSET_TABLE_+(.Ltmp61-.L260$pb), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 1464(%esp), %edx
+ movl -4(%edx), %esi
+ movl %esi, 96(%esp) # 4-byte Spill
+ movl 1460(%esp), %ecx
+ movl (%ecx), %ebx
+ movl %ebx, 76(%esp) # 4-byte Spill
+ movl 4(%ecx), %edi
+ movl %edi, 80(%esp) # 4-byte Spill
+ imull %esi, %ebx
+ movl 132(%ecx), %eax
+ movl %eax, 116(%esp) # 4-byte Spill
+ movl 128(%ecx), %eax
+ movl %eax, 124(%esp) # 4-byte Spill
+ movl 124(%ecx), %eax
+ movl %eax, 132(%esp) # 4-byte Spill
+ movl 120(%ecx), %eax
+ movl %eax, 128(%esp) # 4-byte Spill
+ movl 116(%ecx), %eax
+ movl %eax, 144(%esp) # 4-byte Spill
+ movl 112(%ecx), %eax
+ movl %eax, 152(%esp) # 4-byte Spill
+ movl 108(%ecx), %eax
+ movl %eax, 160(%esp) # 4-byte Spill
+ movl 104(%ecx), %esi
+ movl %esi, 156(%esp) # 4-byte Spill
+ movl 100(%ecx), %eax
+ movl %eax, 172(%esp) # 4-byte Spill
+ movl 96(%ecx), %eax
+ movl %eax, 168(%esp) # 4-byte Spill
+ movl 92(%ecx), %eax
+ movl %eax, 188(%esp) # 4-byte Spill
+ movl 88(%ecx), %eax
+ movl %eax, 184(%esp) # 4-byte Spill
+ movl 84(%ecx), %esi
+ movl %esi, 180(%esp) # 4-byte Spill
+ movl 80(%ecx), %edi
+ movl %edi, 196(%esp) # 4-byte Spill
+ movl 76(%ecx), %esi
+ movl %esi, 192(%esp) # 4-byte Spill
+ movl 72(%ecx), %esi
+ movl %esi, 204(%esp) # 4-byte Spill
+ movl 68(%ecx), %eax
+ movl %eax, 200(%esp) # 4-byte Spill
+ movl 64(%ecx), %ebp
+ movl %ebp, 176(%esp) # 4-byte Spill
+ movl 60(%ecx), %ebp
+ movl %ebp, 164(%esp) # 4-byte Spill
+ movl 56(%ecx), %eax
+ movl %eax, 148(%esp) # 4-byte Spill
+ movl 52(%ecx), %eax
+ movl %eax, 136(%esp) # 4-byte Spill
+ movl 48(%ecx), %eax
+ movl %eax, 140(%esp) # 4-byte Spill
+ movl 44(%ecx), %eax
+ movl %eax, 120(%esp) # 4-byte Spill
+ movl 40(%ecx), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 36(%ecx), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 32(%ecx), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 28(%ecx), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 24(%ecx), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 20(%ecx), %ebp
+ movl 16(%ecx), %esi
+ movl 12(%ecx), %edi
+ movl 8(%ecx), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl (%edx), %ecx
+ movl %ecx, 16(%esp) # 4-byte Spill
+ movl 64(%edx), %ecx
+ movl %ecx, 68(%esp) # 4-byte Spill
+ movl 60(%edx), %ecx
+ movl %ecx, 64(%esp) # 4-byte Spill
+ movl 56(%edx), %ecx
+ movl %ecx, 60(%esp) # 4-byte Spill
+ movl 52(%edx), %ecx
+ movl %ecx, 56(%esp) # 4-byte Spill
+ movl 48(%edx), %ecx
+ movl %ecx, 52(%esp) # 4-byte Spill
+ movl 44(%edx), %ecx
+ movl %ecx, 48(%esp) # 4-byte Spill
+ movl 40(%edx), %ecx
+ movl %ecx, 44(%esp) # 4-byte Spill
+ movl 36(%edx), %ecx
+ movl %ecx, 40(%esp) # 4-byte Spill
+ movl 32(%edx), %ecx
+ movl %ecx, 36(%esp) # 4-byte Spill
+ movl 28(%edx), %ecx
+ movl %ecx, 32(%esp) # 4-byte Spill
+ movl 24(%edx), %ecx
+ movl %ecx, 28(%esp) # 4-byte Spill
+ movl 20(%edx), %ecx
+ movl %ecx, 24(%esp) # 4-byte Spill
+ movl 16(%edx), %ecx
+ movl %ecx, 20(%esp) # 4-byte Spill
+ movl 12(%edx), %ecx
+ movl %ecx, 12(%esp) # 4-byte Spill
+ movl 8(%edx), %ecx
+ movl %ecx, 8(%esp) # 4-byte Spill
+ movl 4(%edx), %ecx
+ movl %ecx, 4(%esp) # 4-byte Spill
+ movl %ebx, (%esp)
+ leal 1360(%esp), %ecx
+ movl 104(%esp), %ebx # 4-byte Reload
+ calll .LmulPv544x32
+ movl 76(%esp), %eax # 4-byte Reload
+ addl 1360(%esp), %eax
+ movl 80(%esp), %ecx # 4-byte Reload
+ adcl 1364(%esp), %ecx
+ movl 100(%esp), %eax # 4-byte Reload
+ adcl 1368(%esp), %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ adcl 1372(%esp), %edi
+ movl %edi, 72(%esp) # 4-byte Spill
+ adcl 1376(%esp), %esi
+ movl %esi, 76(%esp) # 4-byte Spill
+ adcl 1380(%esp), %ebp
+ movl %ebp, 80(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 1384(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 1388(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 1392(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 1396(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 112(%esp), %eax # 4-byte Reload
+ adcl 1400(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 120(%esp), %eax # 4-byte Reload
+ adcl 1404(%esp), %eax
+ movl %eax, 120(%esp) # 4-byte Spill
+ movl 140(%esp), %eax # 4-byte Reload
+ adcl 1408(%esp), %eax
+ movl %eax, 140(%esp) # 4-byte Spill
+ movl 136(%esp), %eax # 4-byte Reload
+ adcl 1412(%esp), %eax
+ movl %eax, 136(%esp) # 4-byte Spill
+ movl 148(%esp), %eax # 4-byte Reload
+ adcl 1416(%esp), %eax
+ movl %eax, 148(%esp) # 4-byte Spill
+ movl 164(%esp), %eax # 4-byte Reload
+ adcl 1420(%esp), %eax
+ movl %eax, 164(%esp) # 4-byte Spill
+ movl 176(%esp), %eax # 4-byte Reload
+ adcl 1424(%esp), %eax
+ movl %eax, 176(%esp) # 4-byte Spill
+ movl 200(%esp), %eax # 4-byte Reload
+ adcl 1428(%esp), %eax
+ movl %eax, 200(%esp) # 4-byte Spill
+ adcl $0, 204(%esp) # 4-byte Folded Spill
+ adcl $0, 192(%esp) # 4-byte Folded Spill
+ adcl $0, 196(%esp) # 4-byte Folded Spill
+ adcl $0, 180(%esp) # 4-byte Folded Spill
+ adcl $0, 184(%esp) # 4-byte Folded Spill
+ adcl $0, 188(%esp) # 4-byte Folded Spill
+ adcl $0, 168(%esp) # 4-byte Folded Spill
+ adcl $0, 172(%esp) # 4-byte Folded Spill
+ adcl $0, 156(%esp) # 4-byte Folded Spill
+ adcl $0, 160(%esp) # 4-byte Folded Spill
+ adcl $0, 152(%esp) # 4-byte Folded Spill
+ adcl $0, 144(%esp) # 4-byte Folded Spill
+ movl 128(%esp), %ebp # 4-byte Reload
+ adcl $0, %ebp
+ adcl $0, 132(%esp) # 4-byte Folded Spill
+ adcl $0, 124(%esp) # 4-byte Folded Spill
+ adcl $0, 116(%esp) # 4-byte Folded Spill
+ sbbl %edi, %edi
+ movl %ecx, %eax
+ movl %ecx, %esi
+ imull 96(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 1288(%esp), %ecx
+ movl 1464(%esp), %edx
+ movl 104(%esp), %ebx # 4-byte Reload
+ calll .LmulPv544x32
+ andl $1, %edi
+ movl %edi, %ecx
+ addl 1288(%esp), %esi
+ movl 100(%esp), %edx # 4-byte Reload
+ adcl 1292(%esp), %edx
+ movl 72(%esp), %eax # 4-byte Reload
+ adcl 1296(%esp), %eax
+ movl %eax, 72(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 1300(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 1304(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 1308(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 1312(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 1316(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 1320(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 112(%esp), %eax # 4-byte Reload
+ adcl 1324(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 120(%esp), %eax # 4-byte Reload
+ adcl 1328(%esp), %eax
+ movl %eax, 120(%esp) # 4-byte Spill
+ movl 140(%esp), %eax # 4-byte Reload
+ adcl 1332(%esp), %eax
+ movl %eax, 140(%esp) # 4-byte Spill
+ movl 136(%esp), %edi # 4-byte Reload
+ adcl 1336(%esp), %edi
+ movl 148(%esp), %eax # 4-byte Reload
+ adcl 1340(%esp), %eax
+ movl %eax, 148(%esp) # 4-byte Spill
+ movl 164(%esp), %eax # 4-byte Reload
+ adcl 1344(%esp), %eax
+ movl %eax, 164(%esp) # 4-byte Spill
+ movl 176(%esp), %eax # 4-byte Reload
+ adcl 1348(%esp), %eax
+ movl %eax, 176(%esp) # 4-byte Spill
+ movl 200(%esp), %eax # 4-byte Reload
+ adcl 1352(%esp), %eax
+ movl %eax, 200(%esp) # 4-byte Spill
+ movl 204(%esp), %eax # 4-byte Reload
+ adcl 1356(%esp), %eax
+ movl %eax, 204(%esp) # 4-byte Spill
+ adcl $0, 192(%esp) # 4-byte Folded Spill
+ adcl $0, 196(%esp) # 4-byte Folded Spill
+ adcl $0, 180(%esp) # 4-byte Folded Spill
+ movl 184(%esp), %eax # 4-byte Reload
+ adcl $0, %eax
+ movl %eax, %esi
+ adcl $0, 188(%esp) # 4-byte Folded Spill
+ adcl $0, 168(%esp) # 4-byte Folded Spill
+ adcl $0, 172(%esp) # 4-byte Folded Spill
+ adcl $0, 156(%esp) # 4-byte Folded Spill
+ adcl $0, 160(%esp) # 4-byte Folded Spill
+ adcl $0, 152(%esp) # 4-byte Folded Spill
+ adcl $0, 144(%esp) # 4-byte Folded Spill
+ adcl $0, %ebp
+ movl %ebp, 128(%esp) # 4-byte Spill
+ adcl $0, 132(%esp) # 4-byte Folded Spill
+ adcl $0, 124(%esp) # 4-byte Folded Spill
+ adcl $0, 116(%esp) # 4-byte Folded Spill
+ adcl $0, %ecx
+ movl %ecx, 100(%esp) # 4-byte Spill
+ movl %edx, %ebp
+ movl %ebp, %eax
+ imull 96(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 1216(%esp), %ecx
+ movl 1464(%esp), %edx
+ movl 104(%esp), %ebx # 4-byte Reload
+ calll .LmulPv544x32
+ addl 1216(%esp), %ebp
+ movl 72(%esp), %ecx # 4-byte Reload
+ adcl 1220(%esp), %ecx
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 1224(%esp), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 1228(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 1232(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 1236(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 1240(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 1244(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 112(%esp), %eax # 4-byte Reload
+ adcl 1248(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 120(%esp), %eax # 4-byte Reload
+ adcl 1252(%esp), %eax
+ movl %eax, 120(%esp) # 4-byte Spill
+ movl 140(%esp), %eax # 4-byte Reload
+ adcl 1256(%esp), %eax
+ movl %eax, 140(%esp) # 4-byte Spill
+ adcl 1260(%esp), %edi
+ movl %edi, 136(%esp) # 4-byte Spill
+ movl 148(%esp), %edi # 4-byte Reload
+ adcl 1264(%esp), %edi
+ movl 164(%esp), %eax # 4-byte Reload
+ adcl 1268(%esp), %eax
+ movl %eax, 164(%esp) # 4-byte Spill
+ movl 176(%esp), %eax # 4-byte Reload
+ adcl 1272(%esp), %eax
+ movl %eax, 176(%esp) # 4-byte Spill
+ movl 200(%esp), %eax # 4-byte Reload
+ adcl 1276(%esp), %eax
+ movl %eax, 200(%esp) # 4-byte Spill
+ movl 204(%esp), %eax # 4-byte Reload
+ adcl 1280(%esp), %eax
+ movl %eax, 204(%esp) # 4-byte Spill
+ movl 192(%esp), %eax # 4-byte Reload
+ adcl 1284(%esp), %eax
+ movl %eax, 192(%esp) # 4-byte Spill
+ adcl $0, 196(%esp) # 4-byte Folded Spill
+ adcl $0, 180(%esp) # 4-byte Folded Spill
+ adcl $0, %esi
+ movl %esi, 184(%esp) # 4-byte Spill
+ adcl $0, 188(%esp) # 4-byte Folded Spill
+ adcl $0, 168(%esp) # 4-byte Folded Spill
+ adcl $0, 172(%esp) # 4-byte Folded Spill
+ adcl $0, 156(%esp) # 4-byte Folded Spill
+ adcl $0, 160(%esp) # 4-byte Folded Spill
+ adcl $0, 152(%esp) # 4-byte Folded Spill
+ movl 144(%esp), %ebp # 4-byte Reload
+ adcl $0, %ebp
+ adcl $0, 128(%esp) # 4-byte Folded Spill
+ adcl $0, 132(%esp) # 4-byte Folded Spill
+ adcl $0, 124(%esp) # 4-byte Folded Spill
+ adcl $0, 116(%esp) # 4-byte Folded Spill
+ adcl $0, 100(%esp) # 4-byte Folded Spill
+ movl %ecx, %eax
+ movl %ecx, %esi
+ imull 96(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 1144(%esp), %ecx
+ movl 1464(%esp), %edx
+ movl 104(%esp), %ebx # 4-byte Reload
+ calll .LmulPv544x32
+ addl 1144(%esp), %esi
+ movl 76(%esp), %ecx # 4-byte Reload
+ adcl 1148(%esp), %ecx
+ movl 80(%esp), %eax # 4-byte Reload
+ adcl 1152(%esp), %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 1156(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 1160(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 1164(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 1168(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 112(%esp), %eax # 4-byte Reload
+ adcl 1172(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 120(%esp), %eax # 4-byte Reload
+ adcl 1176(%esp), %eax
+ movl %eax, 120(%esp) # 4-byte Spill
+ movl 140(%esp), %eax # 4-byte Reload
+ adcl 1180(%esp), %eax
+ movl %eax, 140(%esp) # 4-byte Spill
+ movl 136(%esp), %eax # 4-byte Reload
+ adcl 1184(%esp), %eax
+ movl %eax, 136(%esp) # 4-byte Spill
+ adcl 1188(%esp), %edi
+ movl %edi, 148(%esp) # 4-byte Spill
+ movl 164(%esp), %eax # 4-byte Reload
+ adcl 1192(%esp), %eax
+ movl %eax, 164(%esp) # 4-byte Spill
+ movl 176(%esp), %eax # 4-byte Reload
+ adcl 1196(%esp), %eax
+ movl %eax, 176(%esp) # 4-byte Spill
+ movl 200(%esp), %eax # 4-byte Reload
+ adcl 1200(%esp), %eax
+ movl %eax, 200(%esp) # 4-byte Spill
+ movl 204(%esp), %eax # 4-byte Reload
+ adcl 1204(%esp), %eax
+ movl %eax, 204(%esp) # 4-byte Spill
+ movl 192(%esp), %eax # 4-byte Reload
+ adcl 1208(%esp), %eax
+ movl %eax, 192(%esp) # 4-byte Spill
+ movl 196(%esp), %eax # 4-byte Reload
+ adcl 1212(%esp), %eax
+ movl %eax, 196(%esp) # 4-byte Spill
+ adcl $0, 180(%esp) # 4-byte Folded Spill
+ adcl $0, 184(%esp) # 4-byte Folded Spill
+ movl 188(%esp), %edi # 4-byte Reload
+ adcl $0, %edi
+ adcl $0, 168(%esp) # 4-byte Folded Spill
+ adcl $0, 172(%esp) # 4-byte Folded Spill
+ adcl $0, 156(%esp) # 4-byte Folded Spill
+ adcl $0, 160(%esp) # 4-byte Folded Spill
+ adcl $0, 152(%esp) # 4-byte Folded Spill
+ adcl $0, %ebp
+ movl %ebp, 144(%esp) # 4-byte Spill
+ adcl $0, 128(%esp) # 4-byte Folded Spill
+ adcl $0, 132(%esp) # 4-byte Folded Spill
+ adcl $0, 124(%esp) # 4-byte Folded Spill
+ adcl $0, 116(%esp) # 4-byte Folded Spill
+ adcl $0, 100(%esp) # 4-byte Folded Spill
+ movl %ecx, %esi
+ movl %esi, %eax
+ imull 96(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 1072(%esp), %ecx
+ movl 1464(%esp), %edx
+ movl 104(%esp), %ebx # 4-byte Reload
+ calll .LmulPv544x32
+ addl 1072(%esp), %esi
+ movl 80(%esp), %esi # 4-byte Reload
+ adcl 1076(%esp), %esi
+ movl 84(%esp), %eax # 4-byte Reload
+ adcl 1080(%esp), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 1084(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 1088(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 1092(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 112(%esp), %eax # 4-byte Reload
+ adcl 1096(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 120(%esp), %eax # 4-byte Reload
+ adcl 1100(%esp), %eax
+ movl %eax, 120(%esp) # 4-byte Spill
+ movl 140(%esp), %eax # 4-byte Reload
+ adcl 1104(%esp), %eax
+ movl %eax, 140(%esp) # 4-byte Spill
+ movl 136(%esp), %eax # 4-byte Reload
+ adcl 1108(%esp), %eax
+ movl %eax, 136(%esp) # 4-byte Spill
+ movl 148(%esp), %eax # 4-byte Reload
+ adcl 1112(%esp), %eax
+ movl %eax, 148(%esp) # 4-byte Spill
+ movl 164(%esp), %eax # 4-byte Reload
+ adcl 1116(%esp), %eax
+ movl %eax, 164(%esp) # 4-byte Spill
+ movl 176(%esp), %eax # 4-byte Reload
+ adcl 1120(%esp), %eax
+ movl %eax, 176(%esp) # 4-byte Spill
+ movl 200(%esp), %eax # 4-byte Reload
+ adcl 1124(%esp), %eax
+ movl %eax, 200(%esp) # 4-byte Spill
+ movl 204(%esp), %eax # 4-byte Reload
+ adcl 1128(%esp), %eax
+ movl %eax, 204(%esp) # 4-byte Spill
+ movl 192(%esp), %eax # 4-byte Reload
+ adcl 1132(%esp), %eax
+ movl %eax, 192(%esp) # 4-byte Spill
+ movl 196(%esp), %eax # 4-byte Reload
+ adcl 1136(%esp), %eax
+ movl %eax, 196(%esp) # 4-byte Spill
+ movl 180(%esp), %eax # 4-byte Reload
+ adcl 1140(%esp), %eax
+ movl %eax, 180(%esp) # 4-byte Spill
+ adcl $0, 184(%esp) # 4-byte Folded Spill
+ adcl $0, %edi
+ movl %edi, 188(%esp) # 4-byte Spill
+ adcl $0, 168(%esp) # 4-byte Folded Spill
+ movl 172(%esp), %ebp # 4-byte Reload
+ adcl $0, %ebp
+ adcl $0, 156(%esp) # 4-byte Folded Spill
+ adcl $0, 160(%esp) # 4-byte Folded Spill
+ movl 152(%esp), %edi # 4-byte Reload
+ adcl $0, %edi
+ adcl $0, 144(%esp) # 4-byte Folded Spill
+ adcl $0, 128(%esp) # 4-byte Folded Spill
+ adcl $0, 132(%esp) # 4-byte Folded Spill
+ adcl $0, 124(%esp) # 4-byte Folded Spill
+ adcl $0, 116(%esp) # 4-byte Folded Spill
+ adcl $0, 100(%esp) # 4-byte Folded Spill
+ movl %esi, %eax
+ imull 96(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 1000(%esp), %ecx
+ movl 1464(%esp), %edx
+ movl 104(%esp), %ebx # 4-byte Reload
+ calll .LmulPv544x32
+ addl 1000(%esp), %esi
+ movl 84(%esp), %ecx # 4-byte Reload
+ adcl 1004(%esp), %ecx
+ movl 88(%esp), %eax # 4-byte Reload
+ adcl 1008(%esp), %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 1012(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 1016(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 112(%esp), %eax # 4-byte Reload
+ adcl 1020(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 120(%esp), %eax # 4-byte Reload
+ adcl 1024(%esp), %eax
+ movl %eax, 120(%esp) # 4-byte Spill
+ movl 140(%esp), %eax # 4-byte Reload
+ adcl 1028(%esp), %eax
+ movl %eax, 140(%esp) # 4-byte Spill
+ movl 136(%esp), %eax # 4-byte Reload
+ adcl 1032(%esp), %eax
+ movl %eax, 136(%esp) # 4-byte Spill
+ movl 148(%esp), %eax # 4-byte Reload
+ adcl 1036(%esp), %eax
+ movl %eax, 148(%esp) # 4-byte Spill
+ movl 164(%esp), %eax # 4-byte Reload
+ adcl 1040(%esp), %eax
+ movl %eax, 164(%esp) # 4-byte Spill
+ movl 176(%esp), %eax # 4-byte Reload
+ adcl 1044(%esp), %eax
+ movl %eax, 176(%esp) # 4-byte Spill
+ movl 200(%esp), %eax # 4-byte Reload
+ adcl 1048(%esp), %eax
+ movl %eax, 200(%esp) # 4-byte Spill
+ movl 204(%esp), %eax # 4-byte Reload
+ adcl 1052(%esp), %eax
+ movl %eax, 204(%esp) # 4-byte Spill
+ movl 192(%esp), %eax # 4-byte Reload
+ adcl 1056(%esp), %eax
+ movl %eax, 192(%esp) # 4-byte Spill
+ movl 196(%esp), %eax # 4-byte Reload
+ adcl 1060(%esp), %eax
+ movl %eax, 196(%esp) # 4-byte Spill
+ movl 180(%esp), %eax # 4-byte Reload
+ adcl 1064(%esp), %eax
+ movl %eax, 180(%esp) # 4-byte Spill
+ movl 184(%esp), %eax # 4-byte Reload
+ adcl 1068(%esp), %eax
+ movl %eax, 184(%esp) # 4-byte Spill
+ adcl $0, 188(%esp) # 4-byte Folded Spill
+ adcl $0, 168(%esp) # 4-byte Folded Spill
+ adcl $0, %ebp
+ movl %ebp, 172(%esp) # 4-byte Spill
+ adcl $0, 156(%esp) # 4-byte Folded Spill
+ adcl $0, 160(%esp) # 4-byte Folded Spill
+ adcl $0, %edi
+ movl %edi, 152(%esp) # 4-byte Spill
+ adcl $0, 144(%esp) # 4-byte Folded Spill
+ adcl $0, 128(%esp) # 4-byte Folded Spill
+ adcl $0, 132(%esp) # 4-byte Folded Spill
+ adcl $0, 124(%esp) # 4-byte Folded Spill
+ adcl $0, 116(%esp) # 4-byte Folded Spill
+ adcl $0, 100(%esp) # 4-byte Folded Spill
+ movl %ecx, %esi
+ movl %esi, %eax
+ imull 96(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 928(%esp), %ecx
+ movl 1464(%esp), %edx
+ movl 104(%esp), %ebx # 4-byte Reload
+ calll .LmulPv544x32
+ addl 928(%esp), %esi
+ movl 88(%esp), %esi # 4-byte Reload
+ adcl 932(%esp), %esi
+ movl 92(%esp), %eax # 4-byte Reload
+ adcl 936(%esp), %eax
+ movl %eax, 92(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 940(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 112(%esp), %eax # 4-byte Reload
+ adcl 944(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 120(%esp), %eax # 4-byte Reload
+ adcl 948(%esp), %eax
+ movl %eax, 120(%esp) # 4-byte Spill
+ movl 140(%esp), %eax # 4-byte Reload
+ adcl 952(%esp), %eax
+ movl %eax, 140(%esp) # 4-byte Spill
+ movl 136(%esp), %eax # 4-byte Reload
+ adcl 956(%esp), %eax
+ movl %eax, 136(%esp) # 4-byte Spill
+ movl 148(%esp), %eax # 4-byte Reload
+ adcl 960(%esp), %eax
+ movl %eax, 148(%esp) # 4-byte Spill
+ movl 164(%esp), %eax # 4-byte Reload
+ adcl 964(%esp), %eax
+ movl %eax, 164(%esp) # 4-byte Spill
+ movl 176(%esp), %eax # 4-byte Reload
+ adcl 968(%esp), %eax
+ movl %eax, 176(%esp) # 4-byte Spill
+ movl 200(%esp), %eax # 4-byte Reload
+ adcl 972(%esp), %eax
+ movl %eax, 200(%esp) # 4-byte Spill
+ movl 204(%esp), %eax # 4-byte Reload
+ adcl 976(%esp), %eax
+ movl %eax, 204(%esp) # 4-byte Spill
+ movl 192(%esp), %eax # 4-byte Reload
+ adcl 980(%esp), %eax
+ movl %eax, 192(%esp) # 4-byte Spill
+ movl 196(%esp), %eax # 4-byte Reload
+ adcl 984(%esp), %eax
+ movl %eax, 196(%esp) # 4-byte Spill
+ movl 180(%esp), %eax # 4-byte Reload
+ adcl 988(%esp), %eax
+ movl %eax, 180(%esp) # 4-byte Spill
+ movl 184(%esp), %eax # 4-byte Reload
+ adcl 992(%esp), %eax
+ movl %eax, 184(%esp) # 4-byte Spill
+ movl 188(%esp), %eax # 4-byte Reload
+ adcl 996(%esp), %eax
+ movl %eax, 188(%esp) # 4-byte Spill
+ movl 168(%esp), %ebp # 4-byte Reload
+ adcl $0, %ebp
+ adcl $0, 172(%esp) # 4-byte Folded Spill
+ adcl $0, 156(%esp) # 4-byte Folded Spill
+ movl 160(%esp), %edi # 4-byte Reload
+ adcl $0, %edi
+ adcl $0, 152(%esp) # 4-byte Folded Spill
+ adcl $0, 144(%esp) # 4-byte Folded Spill
+ adcl $0, 128(%esp) # 4-byte Folded Spill
+ adcl $0, 132(%esp) # 4-byte Folded Spill
+ adcl $0, 124(%esp) # 4-byte Folded Spill
+ adcl $0, 116(%esp) # 4-byte Folded Spill
+ adcl $0, 100(%esp) # 4-byte Folded Spill
+ movl %esi, %eax
+ imull 96(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 856(%esp), %ecx
+ movl 1464(%esp), %edx
+ movl 104(%esp), %ebx # 4-byte Reload
+ calll .LmulPv544x32
+ addl 856(%esp), %esi
+ movl 92(%esp), %ecx # 4-byte Reload
+ adcl 860(%esp), %ecx
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 864(%esp), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 112(%esp), %eax # 4-byte Reload
+ adcl 868(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 120(%esp), %eax # 4-byte Reload
+ adcl 872(%esp), %eax
+ movl %eax, 120(%esp) # 4-byte Spill
+ movl 140(%esp), %eax # 4-byte Reload
+ adcl 876(%esp), %eax
+ movl %eax, 140(%esp) # 4-byte Spill
+ movl 136(%esp), %eax # 4-byte Reload
+ adcl 880(%esp), %eax
+ movl %eax, 136(%esp) # 4-byte Spill
+ movl 148(%esp), %eax # 4-byte Reload
+ adcl 884(%esp), %eax
+ movl %eax, 148(%esp) # 4-byte Spill
+ movl 164(%esp), %eax # 4-byte Reload
+ adcl 888(%esp), %eax
+ movl %eax, 164(%esp) # 4-byte Spill
+ movl 176(%esp), %eax # 4-byte Reload
+ adcl 892(%esp), %eax
+ movl %eax, 176(%esp) # 4-byte Spill
+ movl 200(%esp), %eax # 4-byte Reload
+ adcl 896(%esp), %eax
+ movl %eax, 200(%esp) # 4-byte Spill
+ movl 204(%esp), %eax # 4-byte Reload
+ adcl 900(%esp), %eax
+ movl %eax, 204(%esp) # 4-byte Spill
+ movl 192(%esp), %eax # 4-byte Reload
+ adcl 904(%esp), %eax
+ movl %eax, 192(%esp) # 4-byte Spill
+ movl 196(%esp), %eax # 4-byte Reload
+ adcl 908(%esp), %eax
+ movl %eax, 196(%esp) # 4-byte Spill
+ movl 180(%esp), %eax # 4-byte Reload
+ adcl 912(%esp), %eax
+ movl %eax, 180(%esp) # 4-byte Spill
+ movl 184(%esp), %eax # 4-byte Reload
+ adcl 916(%esp), %eax
+ movl %eax, 184(%esp) # 4-byte Spill
+ movl 188(%esp), %eax # 4-byte Reload
+ adcl 920(%esp), %eax
+ movl %eax, 188(%esp) # 4-byte Spill
+ adcl 924(%esp), %ebp
+ movl %ebp, 168(%esp) # 4-byte Spill
+ adcl $0, 172(%esp) # 4-byte Folded Spill
+ adcl $0, 156(%esp) # 4-byte Folded Spill
+ adcl $0, %edi
+ movl %edi, 160(%esp) # 4-byte Spill
+ adcl $0, 152(%esp) # 4-byte Folded Spill
+ adcl $0, 144(%esp) # 4-byte Folded Spill
+ adcl $0, 128(%esp) # 4-byte Folded Spill
+ adcl $0, 132(%esp) # 4-byte Folded Spill
+ movl 124(%esp), %esi # 4-byte Reload
+ adcl $0, %esi
+ adcl $0, 116(%esp) # 4-byte Folded Spill
+ adcl $0, 100(%esp) # 4-byte Folded Spill
+ movl %ecx, %edi
+ movl %edi, %eax
+ movl 96(%esp), %ebp # 4-byte Reload
+ imull %ebp, %eax
+ movl %eax, (%esp)
+ leal 784(%esp), %ecx
+ movl 1464(%esp), %edx
+ movl 104(%esp), %ebx # 4-byte Reload
+ calll .LmulPv544x32
+ addl 784(%esp), %edi
+ movl 108(%esp), %ecx # 4-byte Reload
+ adcl 788(%esp), %ecx
+ movl 112(%esp), %eax # 4-byte Reload
+ adcl 792(%esp), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 120(%esp), %eax # 4-byte Reload
+ adcl 796(%esp), %eax
+ movl %eax, 120(%esp) # 4-byte Spill
+ movl 140(%esp), %eax # 4-byte Reload
+ adcl 800(%esp), %eax
+ movl %eax, 140(%esp) # 4-byte Spill
+ movl 136(%esp), %eax # 4-byte Reload
+ adcl 804(%esp), %eax
+ movl %eax, 136(%esp) # 4-byte Spill
+ movl 148(%esp), %eax # 4-byte Reload
+ adcl 808(%esp), %eax
+ movl %eax, 148(%esp) # 4-byte Spill
+ movl 164(%esp), %eax # 4-byte Reload
+ adcl 812(%esp), %eax
+ movl %eax, 164(%esp) # 4-byte Spill
+ movl 176(%esp), %eax # 4-byte Reload
+ adcl 816(%esp), %eax
+ movl %eax, 176(%esp) # 4-byte Spill
+ movl 200(%esp), %eax # 4-byte Reload
+ adcl 820(%esp), %eax
+ movl %eax, 200(%esp) # 4-byte Spill
+ movl 204(%esp), %eax # 4-byte Reload
+ adcl 824(%esp), %eax
+ movl %eax, 204(%esp) # 4-byte Spill
+ movl 192(%esp), %eax # 4-byte Reload
+ adcl 828(%esp), %eax
+ movl %eax, 192(%esp) # 4-byte Spill
+ movl 196(%esp), %eax # 4-byte Reload
+ adcl 832(%esp), %eax
+ movl %eax, 196(%esp) # 4-byte Spill
+ movl 180(%esp), %eax # 4-byte Reload
+ adcl 836(%esp), %eax
+ movl %eax, 180(%esp) # 4-byte Spill
+ movl 184(%esp), %eax # 4-byte Reload
+ adcl 840(%esp), %eax
+ movl %eax, 184(%esp) # 4-byte Spill
+ movl 188(%esp), %eax # 4-byte Reload
+ adcl 844(%esp), %eax
+ movl %eax, 188(%esp) # 4-byte Spill
+ movl 168(%esp), %eax # 4-byte Reload
+ adcl 848(%esp), %eax
+ movl %eax, 168(%esp) # 4-byte Spill
+ movl 172(%esp), %eax # 4-byte Reload
+ adcl 852(%esp), %eax
+ movl %eax, 172(%esp) # 4-byte Spill
+ movl 156(%esp), %edi # 4-byte Reload
+ adcl $0, %edi
+ adcl $0, 160(%esp) # 4-byte Folded Spill
+ adcl $0, 152(%esp) # 4-byte Folded Spill
+ adcl $0, 144(%esp) # 4-byte Folded Spill
+ adcl $0, 128(%esp) # 4-byte Folded Spill
+ adcl $0, 132(%esp) # 4-byte Folded Spill
+ adcl $0, %esi
+ movl %esi, 124(%esp) # 4-byte Spill
+ adcl $0, 116(%esp) # 4-byte Folded Spill
+ adcl $0, 100(%esp) # 4-byte Folded Spill
+ movl %ecx, %eax
+ movl %ecx, %esi
+ imull %ebp, %eax
+ movl %eax, (%esp)
+ leal 712(%esp), %ecx
+ movl 1464(%esp), %edx
+ movl 104(%esp), %ebx # 4-byte Reload
+ calll .LmulPv544x32
+ addl 712(%esp), %esi
+ movl 112(%esp), %ecx # 4-byte Reload
+ adcl 716(%esp), %ecx
+ movl 120(%esp), %eax # 4-byte Reload
+ adcl 720(%esp), %eax
+ movl %eax, 120(%esp) # 4-byte Spill
+ movl 140(%esp), %eax # 4-byte Reload
+ adcl 724(%esp), %eax
+ movl %eax, 140(%esp) # 4-byte Spill
+ movl 136(%esp), %eax # 4-byte Reload
+ adcl 728(%esp), %eax
+ movl %eax, 136(%esp) # 4-byte Spill
+ movl 148(%esp), %eax # 4-byte Reload
+ adcl 732(%esp), %eax
+ movl %eax, 148(%esp) # 4-byte Spill
+ movl 164(%esp), %eax # 4-byte Reload
+ adcl 736(%esp), %eax
+ movl %eax, 164(%esp) # 4-byte Spill
+ movl 176(%esp), %eax # 4-byte Reload
+ adcl 740(%esp), %eax
+ movl %eax, 176(%esp) # 4-byte Spill
+ movl 200(%esp), %eax # 4-byte Reload
+ adcl 744(%esp), %eax
+ movl %eax, 200(%esp) # 4-byte Spill
+ movl 204(%esp), %eax # 4-byte Reload
+ adcl 748(%esp), %eax
+ movl %eax, 204(%esp) # 4-byte Spill
+ movl 192(%esp), %eax # 4-byte Reload
+ adcl 752(%esp), %eax
+ movl %eax, 192(%esp) # 4-byte Spill
+ movl 196(%esp), %eax # 4-byte Reload
+ adcl 756(%esp), %eax
+ movl %eax, 196(%esp) # 4-byte Spill
+ movl 180(%esp), %ebp # 4-byte Reload
+ adcl 760(%esp), %ebp
+ movl 184(%esp), %eax # 4-byte Reload
+ adcl 764(%esp), %eax
+ movl %eax, 184(%esp) # 4-byte Spill
+ movl 188(%esp), %eax # 4-byte Reload
+ adcl 768(%esp), %eax
+ movl %eax, 188(%esp) # 4-byte Spill
+ movl 168(%esp), %eax # 4-byte Reload
+ adcl 772(%esp), %eax
+ movl %eax, 168(%esp) # 4-byte Spill
+ movl 172(%esp), %eax # 4-byte Reload
+ adcl 776(%esp), %eax
+ movl %eax, 172(%esp) # 4-byte Spill
+ adcl 780(%esp), %edi
+ movl %edi, 156(%esp) # 4-byte Spill
+ adcl $0, 160(%esp) # 4-byte Folded Spill
+ adcl $0, 152(%esp) # 4-byte Folded Spill
+ adcl $0, 144(%esp) # 4-byte Folded Spill
+ adcl $0, 128(%esp) # 4-byte Folded Spill
+ adcl $0, 132(%esp) # 4-byte Folded Spill
+ adcl $0, 124(%esp) # 4-byte Folded Spill
+ adcl $0, 116(%esp) # 4-byte Folded Spill
+ adcl $0, 100(%esp) # 4-byte Folded Spill
+ movl %ecx, %esi
+ movl %esi, %eax
+ imull 96(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 640(%esp), %ecx
+ movl 1464(%esp), %edx
+ movl 104(%esp), %ebx # 4-byte Reload
+ calll .LmulPv544x32
+ addl 640(%esp), %esi
+ movl 120(%esp), %ecx # 4-byte Reload
+ adcl 644(%esp), %ecx
+ movl 140(%esp), %eax # 4-byte Reload
+ adcl 648(%esp), %eax
+ movl %eax, 140(%esp) # 4-byte Spill
+ movl 136(%esp), %eax # 4-byte Reload
+ adcl 652(%esp), %eax
+ movl %eax, 136(%esp) # 4-byte Spill
+ movl 148(%esp), %eax # 4-byte Reload
+ adcl 656(%esp), %eax
+ movl %eax, 148(%esp) # 4-byte Spill
+ movl 164(%esp), %eax # 4-byte Reload
+ adcl 660(%esp), %eax
+ movl %eax, 164(%esp) # 4-byte Spill
+ movl 176(%esp), %eax # 4-byte Reload
+ adcl 664(%esp), %eax
+ movl %eax, 176(%esp) # 4-byte Spill
+ movl 200(%esp), %eax # 4-byte Reload
+ adcl 668(%esp), %eax
+ movl %eax, 200(%esp) # 4-byte Spill
+ movl 204(%esp), %edi # 4-byte Reload
+ adcl 672(%esp), %edi
+ movl 192(%esp), %esi # 4-byte Reload
+ adcl 676(%esp), %esi
+ movl 196(%esp), %eax # 4-byte Reload
+ adcl 680(%esp), %eax
+ movl %eax, 196(%esp) # 4-byte Spill
+ adcl 684(%esp), %ebp
+ movl %ebp, 180(%esp) # 4-byte Spill
+ movl 184(%esp), %eax # 4-byte Reload
+ adcl 688(%esp), %eax
+ movl %eax, 184(%esp) # 4-byte Spill
+ movl 188(%esp), %eax # 4-byte Reload
+ adcl 692(%esp), %eax
+ movl %eax, 188(%esp) # 4-byte Spill
+ movl 168(%esp), %eax # 4-byte Reload
+ adcl 696(%esp), %eax
+ movl %eax, 168(%esp) # 4-byte Spill
+ movl 172(%esp), %eax # 4-byte Reload
+ adcl 700(%esp), %eax
+ movl %eax, 172(%esp) # 4-byte Spill
+ movl 156(%esp), %eax # 4-byte Reload
+ adcl 704(%esp), %eax
+ movl %eax, 156(%esp) # 4-byte Spill
+ movl 160(%esp), %eax # 4-byte Reload
+ adcl 708(%esp), %eax
+ movl %eax, 160(%esp) # 4-byte Spill
+ adcl $0, 152(%esp) # 4-byte Folded Spill
+ adcl $0, 144(%esp) # 4-byte Folded Spill
+ adcl $0, 128(%esp) # 4-byte Folded Spill
+ adcl $0, 132(%esp) # 4-byte Folded Spill
+ adcl $0, 124(%esp) # 4-byte Folded Spill
+ adcl $0, 116(%esp) # 4-byte Folded Spill
+ adcl $0, 100(%esp) # 4-byte Folded Spill
+ movl %ecx, %ebp
+ movl %ebp, %eax
+ imull 96(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 568(%esp), %ecx
+ movl 1464(%esp), %eax
+ movl %eax, %edx
+ movl 104(%esp), %ebx # 4-byte Reload
+ calll .LmulPv544x32
+ addl 568(%esp), %ebp
+ movl 140(%esp), %ecx # 4-byte Reload
+ adcl 572(%esp), %ecx
+ movl 136(%esp), %eax # 4-byte Reload
+ adcl 576(%esp), %eax
+ movl %eax, 136(%esp) # 4-byte Spill
+ movl 148(%esp), %eax # 4-byte Reload
+ adcl 580(%esp), %eax
+ movl %eax, 148(%esp) # 4-byte Spill
+ movl 164(%esp), %eax # 4-byte Reload
+ adcl 584(%esp), %eax
+ movl %eax, 164(%esp) # 4-byte Spill
+ movl 176(%esp), %ebp # 4-byte Reload
+ adcl 588(%esp), %ebp
+ movl 200(%esp), %eax # 4-byte Reload
+ adcl 592(%esp), %eax
+ movl %eax, 200(%esp) # 4-byte Spill
+ adcl 596(%esp), %edi
+ movl %edi, 204(%esp) # 4-byte Spill
+ adcl 600(%esp), %esi
+ movl %esi, 192(%esp) # 4-byte Spill
+ movl 196(%esp), %esi # 4-byte Reload
+ adcl 604(%esp), %esi
+ movl 180(%esp), %eax # 4-byte Reload
+ adcl 608(%esp), %eax
+ movl %eax, 180(%esp) # 4-byte Spill
+ movl 184(%esp), %eax # 4-byte Reload
+ adcl 612(%esp), %eax
+ movl %eax, 184(%esp) # 4-byte Spill
+ movl 188(%esp), %eax # 4-byte Reload
+ adcl 616(%esp), %eax
+ movl %eax, 188(%esp) # 4-byte Spill
+ movl 168(%esp), %eax # 4-byte Reload
+ adcl 620(%esp), %eax
+ movl %eax, 168(%esp) # 4-byte Spill
+ movl 172(%esp), %eax # 4-byte Reload
+ adcl 624(%esp), %eax
+ movl %eax, 172(%esp) # 4-byte Spill
+ movl 156(%esp), %eax # 4-byte Reload
+ adcl 628(%esp), %eax
+ movl %eax, 156(%esp) # 4-byte Spill
+ movl 160(%esp), %eax # 4-byte Reload
+ adcl 632(%esp), %eax
+ movl %eax, 160(%esp) # 4-byte Spill
+ movl 152(%esp), %eax # 4-byte Reload
+ adcl 636(%esp), %eax
+ movl %eax, 152(%esp) # 4-byte Spill
+ adcl $0, 144(%esp) # 4-byte Folded Spill
+ adcl $0, 128(%esp) # 4-byte Folded Spill
+ adcl $0, 132(%esp) # 4-byte Folded Spill
+ adcl $0, 124(%esp) # 4-byte Folded Spill
+ adcl $0, 116(%esp) # 4-byte Folded Spill
+ adcl $0, 100(%esp) # 4-byte Folded Spill
+ movl %ecx, %edi
+ movl %edi, %eax
+ imull 96(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 496(%esp), %ecx
+ movl 1464(%esp), %edx
+ movl 104(%esp), %ebx # 4-byte Reload
+ calll .LmulPv544x32
+ addl 496(%esp), %edi
+ movl 136(%esp), %edi # 4-byte Reload
+ adcl 500(%esp), %edi
+ movl 148(%esp), %eax # 4-byte Reload
+ adcl 504(%esp), %eax
+ movl %eax, 148(%esp) # 4-byte Spill
+ movl 164(%esp), %eax # 4-byte Reload
+ adcl 508(%esp), %eax
+ movl %eax, 164(%esp) # 4-byte Spill
+ adcl 512(%esp), %ebp
+ movl %ebp, 176(%esp) # 4-byte Spill
+ movl 200(%esp), %ebp # 4-byte Reload
+ adcl 516(%esp), %ebp
+ movl 204(%esp), %eax # 4-byte Reload
+ adcl 520(%esp), %eax
+ movl %eax, 204(%esp) # 4-byte Spill
+ movl 192(%esp), %eax # 4-byte Reload
+ adcl 524(%esp), %eax
+ movl %eax, 192(%esp) # 4-byte Spill
+ adcl 528(%esp), %esi
+ movl %esi, 196(%esp) # 4-byte Spill
+ movl 180(%esp), %eax # 4-byte Reload
+ adcl 532(%esp), %eax
+ movl %eax, 180(%esp) # 4-byte Spill
+ movl 184(%esp), %eax # 4-byte Reload
+ adcl 536(%esp), %eax
+ movl %eax, 184(%esp) # 4-byte Spill
+ movl 188(%esp), %eax # 4-byte Reload
+ adcl 540(%esp), %eax
+ movl %eax, 188(%esp) # 4-byte Spill
+ movl 168(%esp), %eax # 4-byte Reload
+ adcl 544(%esp), %eax
+ movl %eax, 168(%esp) # 4-byte Spill
+ movl 172(%esp), %eax # 4-byte Reload
+ adcl 548(%esp), %eax
+ movl %eax, 172(%esp) # 4-byte Spill
+ movl 156(%esp), %eax # 4-byte Reload
+ adcl 552(%esp), %eax
+ movl %eax, 156(%esp) # 4-byte Spill
+ movl 160(%esp), %eax # 4-byte Reload
+ adcl 556(%esp), %eax
+ movl %eax, 160(%esp) # 4-byte Spill
+ movl 152(%esp), %eax # 4-byte Reload
+ adcl 560(%esp), %eax
+ movl %eax, 152(%esp) # 4-byte Spill
+ movl 144(%esp), %eax # 4-byte Reload
+ adcl 564(%esp), %eax
+ movl %eax, 144(%esp) # 4-byte Spill
+ adcl $0, 128(%esp) # 4-byte Folded Spill
+ adcl $0, 132(%esp) # 4-byte Folded Spill
+ adcl $0, 124(%esp) # 4-byte Folded Spill
+ adcl $0, 116(%esp) # 4-byte Folded Spill
+ movl 100(%esp), %esi # 4-byte Reload
+ adcl $0, %esi
+ movl %edi, %eax
+ imull 96(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 424(%esp), %ecx
+ movl 1464(%esp), %edx
+ movl 104(%esp), %ebx # 4-byte Reload
+ calll .LmulPv544x32
+ addl 424(%esp), %edi
+ movl 148(%esp), %ecx # 4-byte Reload
+ adcl 428(%esp), %ecx
+ movl 164(%esp), %edi # 4-byte Reload
+ adcl 432(%esp), %edi
+ movl 176(%esp), %eax # 4-byte Reload
+ adcl 436(%esp), %eax
+ movl %eax, 176(%esp) # 4-byte Spill
+ adcl 440(%esp), %ebp
+ movl 204(%esp), %eax # 4-byte Reload
+ adcl 444(%esp), %eax
+ movl %eax, 204(%esp) # 4-byte Spill
+ movl 192(%esp), %eax # 4-byte Reload
+ adcl 448(%esp), %eax
+ movl %eax, 192(%esp) # 4-byte Spill
+ movl 196(%esp), %eax # 4-byte Reload
+ adcl 452(%esp), %eax
+ movl %eax, 196(%esp) # 4-byte Spill
+ movl 180(%esp), %eax # 4-byte Reload
+ adcl 456(%esp), %eax
+ movl %eax, 180(%esp) # 4-byte Spill
+ movl 184(%esp), %eax # 4-byte Reload
+ adcl 460(%esp), %eax
+ movl %eax, 184(%esp) # 4-byte Spill
+ movl 188(%esp), %eax # 4-byte Reload
+ adcl 464(%esp), %eax
+ movl %eax, 188(%esp) # 4-byte Spill
+ movl 168(%esp), %eax # 4-byte Reload
+ adcl 468(%esp), %eax
+ movl %eax, 168(%esp) # 4-byte Spill
+ movl 172(%esp), %eax # 4-byte Reload
+ adcl 472(%esp), %eax
+ movl %eax, 172(%esp) # 4-byte Spill
+ movl 156(%esp), %eax # 4-byte Reload
+ adcl 476(%esp), %eax
+ movl %eax, 156(%esp) # 4-byte Spill
+ movl 160(%esp), %eax # 4-byte Reload
+ adcl 480(%esp), %eax
+ movl %eax, 160(%esp) # 4-byte Spill
+ movl 152(%esp), %eax # 4-byte Reload
+ adcl 484(%esp), %eax
+ movl %eax, 152(%esp) # 4-byte Spill
+ movl 144(%esp), %eax # 4-byte Reload
+ adcl 488(%esp), %eax
+ movl %eax, 144(%esp) # 4-byte Spill
+ movl 128(%esp), %eax # 4-byte Reload
+ adcl 492(%esp), %eax
+ movl %eax, 128(%esp) # 4-byte Spill
+ adcl $0, 132(%esp) # 4-byte Folded Spill
+ adcl $0, 124(%esp) # 4-byte Folded Spill
+ adcl $0, 116(%esp) # 4-byte Folded Spill
+ adcl $0, %esi
+ movl %esi, 100(%esp) # 4-byte Spill
+ movl %ecx, %eax
+ movl %ecx, %esi
+ imull 96(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 352(%esp), %ecx
+ movl 1464(%esp), %edx
+ movl 104(%esp), %ebx # 4-byte Reload
+ calll .LmulPv544x32
+ addl 352(%esp), %esi
+ movl %edi, %ecx
+ adcl 356(%esp), %ecx
+ movl 176(%esp), %eax # 4-byte Reload
+ adcl 360(%esp), %eax
+ movl %eax, 176(%esp) # 4-byte Spill
+ adcl 364(%esp), %ebp
+ movl %ebp, 200(%esp) # 4-byte Spill
+ movl 204(%esp), %eax # 4-byte Reload
+ adcl 368(%esp), %eax
+ movl %eax, 204(%esp) # 4-byte Spill
+ movl 192(%esp), %eax # 4-byte Reload
+ adcl 372(%esp), %eax
+ movl %eax, 192(%esp) # 4-byte Spill
+ movl 196(%esp), %eax # 4-byte Reload
+ adcl 376(%esp), %eax
+ movl %eax, 196(%esp) # 4-byte Spill
+ movl 180(%esp), %eax # 4-byte Reload
+ adcl 380(%esp), %eax
+ movl %eax, 180(%esp) # 4-byte Spill
+ movl 184(%esp), %edi # 4-byte Reload
+ adcl 384(%esp), %edi
+ movl 188(%esp), %eax # 4-byte Reload
+ adcl 388(%esp), %eax
+ movl %eax, 188(%esp) # 4-byte Spill
+ movl 168(%esp), %eax # 4-byte Reload
+ adcl 392(%esp), %eax
+ movl %eax, 168(%esp) # 4-byte Spill
+ movl 172(%esp), %eax # 4-byte Reload
+ adcl 396(%esp), %eax
+ movl %eax, 172(%esp) # 4-byte Spill
+ movl 156(%esp), %eax # 4-byte Reload
+ adcl 400(%esp), %eax
+ movl %eax, 156(%esp) # 4-byte Spill
+ movl 160(%esp), %eax # 4-byte Reload
+ adcl 404(%esp), %eax
+ movl %eax, 160(%esp) # 4-byte Spill
+ movl 152(%esp), %eax # 4-byte Reload
+ adcl 408(%esp), %eax
+ movl %eax, 152(%esp) # 4-byte Spill
+ movl 144(%esp), %eax # 4-byte Reload
+ adcl 412(%esp), %eax
+ movl %eax, 144(%esp) # 4-byte Spill
+ movl 128(%esp), %eax # 4-byte Reload
+ adcl 416(%esp), %eax
+ movl %eax, 128(%esp) # 4-byte Spill
+ movl 132(%esp), %eax # 4-byte Reload
+ adcl 420(%esp), %eax
+ movl %eax, 132(%esp) # 4-byte Spill
+ adcl $0, 124(%esp) # 4-byte Folded Spill
+ adcl $0, 116(%esp) # 4-byte Folded Spill
+ movl 100(%esp), %esi # 4-byte Reload
+ adcl $0, %esi
+ movl %ecx, %eax
+ movl %ecx, %ebp
+ imull 96(%esp), %eax # 4-byte Folded Reload
+ movl %eax, (%esp)
+ leal 280(%esp), %ecx
+ movl 1464(%esp), %edx
+ movl 104(%esp), %ebx # 4-byte Reload
+ calll .LmulPv544x32
+ addl 280(%esp), %ebp
+ movl 176(%esp), %ecx # 4-byte Reload
+ adcl 284(%esp), %ecx
+ movl 200(%esp), %eax # 4-byte Reload
+ adcl 288(%esp), %eax
+ movl %eax, 200(%esp) # 4-byte Spill
+ movl 204(%esp), %eax # 4-byte Reload
+ adcl 292(%esp), %eax
+ movl %eax, 204(%esp) # 4-byte Spill
+ movl 192(%esp), %eax # 4-byte Reload
+ adcl 296(%esp), %eax
+ movl %eax, 192(%esp) # 4-byte Spill
+ movl 196(%esp), %eax # 4-byte Reload
+ adcl 300(%esp), %eax
+ movl %eax, 196(%esp) # 4-byte Spill
+ movl 180(%esp), %eax # 4-byte Reload
+ adcl 304(%esp), %eax
+ movl %eax, 180(%esp) # 4-byte Spill
+ adcl 308(%esp), %edi
+ movl %edi, 184(%esp) # 4-byte Spill
+ movl 188(%esp), %eax # 4-byte Reload
+ adcl 312(%esp), %eax
+ movl %eax, 188(%esp) # 4-byte Spill
+ movl 168(%esp), %eax # 4-byte Reload
+ adcl 316(%esp), %eax
+ movl %eax, 168(%esp) # 4-byte Spill
+ movl 172(%esp), %eax # 4-byte Reload
+ adcl 320(%esp), %eax
+ movl %eax, 172(%esp) # 4-byte Spill
+ movl 156(%esp), %eax # 4-byte Reload
+ adcl 324(%esp), %eax
+ movl %eax, 156(%esp) # 4-byte Spill
+ movl 160(%esp), %eax # 4-byte Reload
+ adcl 328(%esp), %eax
+ movl %eax, 160(%esp) # 4-byte Spill
+ movl 152(%esp), %eax # 4-byte Reload
+ adcl 332(%esp), %eax
+ movl %eax, 152(%esp) # 4-byte Spill
+ movl 144(%esp), %eax # 4-byte Reload
+ adcl 336(%esp), %eax
+ movl %eax, 144(%esp) # 4-byte Spill
+ movl 128(%esp), %eax # 4-byte Reload
+ adcl 340(%esp), %eax
+ movl %eax, 128(%esp) # 4-byte Spill
+ movl 132(%esp), %eax # 4-byte Reload
+ adcl 344(%esp), %eax
+ movl %eax, 132(%esp) # 4-byte Spill
+ movl 124(%esp), %eax # 4-byte Reload
+ adcl 348(%esp), %eax
+ movl %eax, 124(%esp) # 4-byte Spill
+ movl 116(%esp), %edi # 4-byte Reload
+ adcl $0, %edi
+ adcl $0, %esi
+ movl 96(%esp), %eax # 4-byte Reload
+ imull %ecx, %eax
+ movl %ecx, %ebp
+ movl %eax, (%esp)
+ leal 208(%esp), %ecx
+ movl 1464(%esp), %edx
+ movl 104(%esp), %ebx # 4-byte Reload
+ calll .LmulPv544x32
+ addl 208(%esp), %ebp
+ movl 200(%esp), %eax # 4-byte Reload
+ adcl 212(%esp), %eax
+ movl %eax, 200(%esp) # 4-byte Spill
+ movl 204(%esp), %edx # 4-byte Reload
+ adcl 216(%esp), %edx
+ movl %edx, 204(%esp) # 4-byte Spill
+ movl 192(%esp), %ecx # 4-byte Reload
+ adcl 220(%esp), %ecx
+ movl %ecx, 192(%esp) # 4-byte Spill
+ movl 196(%esp), %eax # 4-byte Reload
+ adcl 224(%esp), %eax
+ movl %eax, 196(%esp) # 4-byte Spill
+ movl 180(%esp), %eax # 4-byte Reload
+ adcl 228(%esp), %eax
+ movl %eax, 180(%esp) # 4-byte Spill
+ movl %eax, %ebp
+ movl 184(%esp), %eax # 4-byte Reload
+ adcl 232(%esp), %eax
+ movl %eax, 184(%esp) # 4-byte Spill
+ movl 188(%esp), %eax # 4-byte Reload
+ adcl 236(%esp), %eax
+ movl %eax, 188(%esp) # 4-byte Spill
+ movl 168(%esp), %eax # 4-byte Reload
+ adcl 240(%esp), %eax
+ movl %eax, 168(%esp) # 4-byte Spill
+ movl 172(%esp), %eax # 4-byte Reload
+ adcl 244(%esp), %eax
+ movl %eax, 172(%esp) # 4-byte Spill
+ movl 156(%esp), %eax # 4-byte Reload
+ adcl 248(%esp), %eax
+ movl %eax, 156(%esp) # 4-byte Spill
+ movl 160(%esp), %eax # 4-byte Reload
+ adcl 252(%esp), %eax
+ movl %eax, 160(%esp) # 4-byte Spill
+ movl 152(%esp), %eax # 4-byte Reload
+ adcl 256(%esp), %eax
+ movl %eax, 152(%esp) # 4-byte Spill
+ movl 144(%esp), %eax # 4-byte Reload
+ adcl 260(%esp), %eax
+ movl %eax, 144(%esp) # 4-byte Spill
+ movl 128(%esp), %ebx # 4-byte Reload
+ adcl 264(%esp), %ebx
+ movl %ebx, 128(%esp) # 4-byte Spill
+ movl 132(%esp), %eax # 4-byte Reload
+ adcl 268(%esp), %eax
+ movl %eax, 132(%esp) # 4-byte Spill
+ movl 124(%esp), %eax # 4-byte Reload
+ adcl 272(%esp), %eax
+ movl %eax, 124(%esp) # 4-byte Spill
+ adcl 276(%esp), %edi
+ movl %edi, 116(%esp) # 4-byte Spill
+ adcl $0, %esi
+ movl %esi, 100(%esp) # 4-byte Spill
+ movl 200(%esp), %edi # 4-byte Reload
+ subl 16(%esp), %edi # 4-byte Folded Reload
+ sbbl 4(%esp), %edx # 4-byte Folded Reload
+ sbbl 8(%esp), %ecx # 4-byte Folded Reload
+ movl 196(%esp), %eax # 4-byte Reload
+ sbbl 12(%esp), %eax # 4-byte Folded Reload
+ sbbl 20(%esp), %ebp # 4-byte Folded Reload
+ movl %ebp, 88(%esp) # 4-byte Spill
+ movl 184(%esp), %esi # 4-byte Reload
+ sbbl 24(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 92(%esp) # 4-byte Spill
+ movl 188(%esp), %esi # 4-byte Reload
+ sbbl 28(%esp), %esi # 4-byte Folded Reload
+ movl %esi, 96(%esp) # 4-byte Spill
+ movl 168(%esp), %esi # 4-byte Reload
+ sbbl 32(%esp), %esi # 4-byte Folded Reload
+ movl 172(%esp), %ebp # 4-byte Reload
+ sbbl 36(%esp), %ebp # 4-byte Folded Reload
+ movl %ebp, 104(%esp) # 4-byte Spill
+ movl 156(%esp), %ebp # 4-byte Reload
+ sbbl 40(%esp), %ebp # 4-byte Folded Reload
+ movl %ebp, 108(%esp) # 4-byte Spill
+ movl 160(%esp), %ebp # 4-byte Reload
+ sbbl 44(%esp), %ebp # 4-byte Folded Reload
+ movl %ebp, 112(%esp) # 4-byte Spill
+ movl 152(%esp), %ebp # 4-byte Reload
+ sbbl 48(%esp), %ebp # 4-byte Folded Reload
+ movl %ebp, 120(%esp) # 4-byte Spill
+ movl 144(%esp), %ebp # 4-byte Reload
+ sbbl 52(%esp), %ebp # 4-byte Folded Reload
+ movl %ebp, 136(%esp) # 4-byte Spill
+ sbbl 56(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 140(%esp) # 4-byte Spill
+ movl 132(%esp), %ebx # 4-byte Reload
+ sbbl 60(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 148(%esp) # 4-byte Spill
+ movl 124(%esp), %ebx # 4-byte Reload
+ sbbl 64(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 164(%esp) # 4-byte Spill
+ movl 116(%esp), %ebx # 4-byte Reload
+ sbbl 68(%esp), %ebx # 4-byte Folded Reload
+ movl %ebx, 176(%esp) # 4-byte Spill
+ movl 100(%esp), %ebx # 4-byte Reload
+ sbbl $0, %ebx
+ andl $1, %ebx
+ jne .LBB260_2
+# BB#1:
+ movl %esi, 168(%esp) # 4-byte Spill
+.LBB260_2:
+ testb %bl, %bl
+ movl 200(%esp), %esi # 4-byte Reload
+ jne .LBB260_4
+# BB#3:
+ movl %edi, %esi
+.LBB260_4:
+ movl 1456(%esp), %edi
+ movl %esi, (%edi)
+ movl 156(%esp), %esi # 4-byte Reload
+ movl 204(%esp), %ebx # 4-byte Reload
+ jne .LBB260_6
+# BB#5:
+ movl %edx, %ebx
+.LBB260_6:
+ movl %ebx, 4(%edi)
+ movl 144(%esp), %ebx # 4-byte Reload
+ movl 192(%esp), %edx # 4-byte Reload
+ jne .LBB260_8
+# BB#7:
+ movl %ecx, %edx
+.LBB260_8:
+ movl %edx, 8(%edi)
+ movl 132(%esp), %edx # 4-byte Reload
+ movl 196(%esp), %ecx # 4-byte Reload
+ jne .LBB260_10
+# BB#9:
+ movl %eax, %ecx
+.LBB260_10:
+ movl %ecx, 12(%edi)
+ movl 124(%esp), %ecx # 4-byte Reload
+ movl 180(%esp), %eax # 4-byte Reload
+ jne .LBB260_12
+# BB#11:
+ movl 88(%esp), %eax # 4-byte Reload
+.LBB260_12:
+ movl %eax, 16(%edi)
+ movl 188(%esp), %eax # 4-byte Reload
+ movl 184(%esp), %ebp # 4-byte Reload
+ jne .LBB260_14
+# BB#13:
+ movl 92(%esp), %ebp # 4-byte Reload
+.LBB260_14:
+ movl %ebp, 20(%edi)
+ movl 152(%esp), %ebp # 4-byte Reload
+ jne .LBB260_16
+# BB#15:
+ movl 96(%esp), %eax # 4-byte Reload
+.LBB260_16:
+ movl %eax, 24(%edi)
+ movl 168(%esp), %eax # 4-byte Reload
+ movl %eax, 28(%edi)
+ jne .LBB260_18
+# BB#17:
+ movl 104(%esp), %eax # 4-byte Reload
+ movl %eax, 172(%esp) # 4-byte Spill
+.LBB260_18:
+ movl 172(%esp), %eax # 4-byte Reload
+ movl %eax, 32(%edi)
+ jne .LBB260_20
+# BB#19:
+ movl 108(%esp), %esi # 4-byte Reload
+.LBB260_20:
+ movl %esi, 36(%edi)
+ jne .LBB260_22
+# BB#21:
+ movl 112(%esp), %eax # 4-byte Reload
+ movl %eax, 160(%esp) # 4-byte Spill
+.LBB260_22:
+ movl 160(%esp), %esi # 4-byte Reload
+ movl %esi, 40(%edi)
+ movl 128(%esp), %eax # 4-byte Reload
+ jne .LBB260_24
+# BB#23:
+ movl 120(%esp), %ebp # 4-byte Reload
+.LBB260_24:
+ movl %ebp, 44(%edi)
+ jne .LBB260_26
+# BB#25:
+ movl 136(%esp), %ebx # 4-byte Reload
+.LBB260_26:
+ movl %ebx, 48(%edi)
+ jne .LBB260_28
+# BB#27:
+ movl 140(%esp), %eax # 4-byte Reload
+.LBB260_28:
+ movl %eax, 52(%edi)
+ jne .LBB260_30
+# BB#29:
+ movl 148(%esp), %edx # 4-byte Reload
+.LBB260_30:
+ movl %edx, 56(%edi)
+ movl 116(%esp), %eax # 4-byte Reload
+ jne .LBB260_32
+# BB#31:
+ movl 164(%esp), %ecx # 4-byte Reload
+.LBB260_32:
+ movl %ecx, 60(%edi)
+ jne .LBB260_34
+# BB#33:
+ movl 176(%esp), %eax # 4-byte Reload
+.LBB260_34:
+ movl %eax, 64(%edi)
+ addl $1436, %esp # imm = 0x59C
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end260:
+ .size mcl_fp_montRed17L, .Lfunc_end260-mcl_fp_montRed17L
+
+ .globl mcl_fp_addPre17L
+ .align 16, 0x90
+ .type mcl_fp_addPre17L,@function
+mcl_fp_addPre17L: # @mcl_fp_addPre17L
+# BB#0:
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ movl 24(%esp), %eax
+ movl (%eax), %edx
+ movl 4(%eax), %esi
+ movl 20(%esp), %ecx
+ addl (%ecx), %edx
+ adcl 4(%ecx), %esi
+ movl 8(%eax), %ebx
+ adcl 8(%ecx), %ebx
+ movl 16(%esp), %edi
+ movl %edx, (%edi)
+ movl 12(%ecx), %edx
+ movl %esi, 4(%edi)
+ movl 16(%ecx), %esi
+ adcl 12(%eax), %edx
+ adcl 16(%eax), %esi
+ movl %ebx, 8(%edi)
+ movl 20(%eax), %ebx
+ movl %edx, 12(%edi)
+ movl 20(%ecx), %edx
+ adcl %ebx, %edx
+ movl 24(%eax), %ebx
+ movl %esi, 16(%edi)
+ movl 24(%ecx), %esi
+ adcl %ebx, %esi
+ movl 28(%eax), %ebx
+ movl %edx, 20(%edi)
+ movl 28(%ecx), %edx
+ adcl %ebx, %edx
+ movl 32(%eax), %ebx
+ movl %esi, 24(%edi)
+ movl 32(%ecx), %esi
+ adcl %ebx, %esi
+ movl 36(%eax), %ebx
+ movl %edx, 28(%edi)
+ movl 36(%ecx), %edx
+ adcl %ebx, %edx
+ movl 40(%eax), %ebx
+ movl %esi, 32(%edi)
+ movl 40(%ecx), %esi
+ adcl %ebx, %esi
+ movl 44(%eax), %ebx
+ movl %edx, 36(%edi)
+ movl 44(%ecx), %edx
+ adcl %ebx, %edx
+ movl 48(%eax), %ebx
+ movl %esi, 40(%edi)
+ movl 48(%ecx), %esi
+ adcl %ebx, %esi
+ movl 52(%eax), %ebx
+ movl %edx, 44(%edi)
+ movl 52(%ecx), %edx
+ adcl %ebx, %edx
+ movl 56(%eax), %ebx
+ movl %esi, 48(%edi)
+ movl 56(%ecx), %esi
+ adcl %ebx, %esi
+ movl 60(%eax), %ebx
+ movl %edx, 52(%edi)
+ movl 60(%ecx), %edx
+ adcl %ebx, %edx
+ movl %esi, 56(%edi)
+ movl %edx, 60(%edi)
+ movl 64(%eax), %eax
+ movl 64(%ecx), %ecx
+ adcl %eax, %ecx
+ movl %ecx, 64(%edi)
+ sbbl %eax, %eax
+ andl $1, %eax
+ popl %esi
+ popl %edi
+ popl %ebx
+ retl
+.Lfunc_end261:
+ .size mcl_fp_addPre17L, .Lfunc_end261-mcl_fp_addPre17L
+
+ .globl mcl_fp_subPre17L
+ .align 16, 0x90
+ .type mcl_fp_subPre17L,@function
+mcl_fp_subPre17L: # @mcl_fp_subPre17L
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ movl 24(%esp), %ecx
+ movl (%ecx), %esi
+ movl 4(%ecx), %edi
+ xorl %eax, %eax
+ movl 28(%esp), %edx
+ subl (%edx), %esi
+ sbbl 4(%edx), %edi
+ movl 8(%ecx), %ebp
+ sbbl 8(%edx), %ebp
+ movl 20(%esp), %ebx
+ movl %esi, (%ebx)
+ movl 12(%ecx), %esi
+ sbbl 12(%edx), %esi
+ movl %edi, 4(%ebx)
+ movl 16(%ecx), %edi
+ sbbl 16(%edx), %edi
+ movl %ebp, 8(%ebx)
+ movl 20(%edx), %ebp
+ movl %esi, 12(%ebx)
+ movl 20(%ecx), %esi
+ sbbl %ebp, %esi
+ movl 24(%edx), %ebp
+ movl %edi, 16(%ebx)
+ movl 24(%ecx), %edi
+ sbbl %ebp, %edi
+ movl 28(%edx), %ebp
+ movl %esi, 20(%ebx)
+ movl 28(%ecx), %esi
+ sbbl %ebp, %esi
+ movl 32(%edx), %ebp
+ movl %edi, 24(%ebx)
+ movl 32(%ecx), %edi
+ sbbl %ebp, %edi
+ movl 36(%edx), %ebp
+ movl %esi, 28(%ebx)
+ movl 36(%ecx), %esi
+ sbbl %ebp, %esi
+ movl 40(%edx), %ebp
+ movl %edi, 32(%ebx)
+ movl 40(%ecx), %edi
+ sbbl %ebp, %edi
+ movl 44(%edx), %ebp
+ movl %esi, 36(%ebx)
+ movl 44(%ecx), %esi
+ sbbl %ebp, %esi
+ movl 48(%edx), %ebp
+ movl %edi, 40(%ebx)
+ movl 48(%ecx), %edi
+ sbbl %ebp, %edi
+ movl 52(%edx), %ebp
+ movl %esi, 44(%ebx)
+ movl 52(%ecx), %esi
+ sbbl %ebp, %esi
+ movl 56(%edx), %ebp
+ movl %edi, 48(%ebx)
+ movl 56(%ecx), %edi
+ sbbl %ebp, %edi
+ movl 60(%edx), %ebp
+ movl %esi, 52(%ebx)
+ movl 60(%ecx), %esi
+ sbbl %ebp, %esi
+ movl %edi, 56(%ebx)
+ movl %esi, 60(%ebx)
+ movl 64(%edx), %edx
+ movl 64(%ecx), %ecx
+ sbbl %edx, %ecx
+ movl %ecx, 64(%ebx)
+ sbbl $0, %eax
+ andl $1, %eax
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end262:
+ .size mcl_fp_subPre17L, .Lfunc_end262-mcl_fp_subPre17L
+
+ .globl mcl_fp_shr1_17L
+ .align 16, 0x90
+ .type mcl_fp_shr1_17L,@function
+mcl_fp_shr1_17L: # @mcl_fp_shr1_17L
+# BB#0:
+ pushl %esi
+ movl 12(%esp), %eax
+ movl (%eax), %edx
+ movl 4(%eax), %esi
+ shrdl $1, %esi, %edx
+ movl 8(%esp), %ecx
+ movl %edx, (%ecx)
+ movl 8(%eax), %edx
+ shrdl $1, %edx, %esi
+ movl %esi, 4(%ecx)
+ movl 12(%eax), %esi
+ shrdl $1, %esi, %edx
+ movl %edx, 8(%ecx)
+ movl 16(%eax), %edx
+ shrdl $1, %edx, %esi
+ movl %esi, 12(%ecx)
+ movl 20(%eax), %esi
+ shrdl $1, %esi, %edx
+ movl %edx, 16(%ecx)
+ movl 24(%eax), %edx
+ shrdl $1, %edx, %esi
+ movl %esi, 20(%ecx)
+ movl 28(%eax), %esi
+ shrdl $1, %esi, %edx
+ movl %edx, 24(%ecx)
+ movl 32(%eax), %edx
+ shrdl $1, %edx, %esi
+ movl %esi, 28(%ecx)
+ movl 36(%eax), %esi
+ shrdl $1, %esi, %edx
+ movl %edx, 32(%ecx)
+ movl 40(%eax), %edx
+ shrdl $1, %edx, %esi
+ movl %esi, 36(%ecx)
+ movl 44(%eax), %esi
+ shrdl $1, %esi, %edx
+ movl %edx, 40(%ecx)
+ movl 48(%eax), %edx
+ shrdl $1, %edx, %esi
+ movl %esi, 44(%ecx)
+ movl 52(%eax), %esi
+ shrdl $1, %esi, %edx
+ movl %edx, 48(%ecx)
+ movl 56(%eax), %edx
+ shrdl $1, %edx, %esi
+ movl %esi, 52(%ecx)
+ movl 60(%eax), %esi
+ shrdl $1, %esi, %edx
+ movl %edx, 56(%ecx)
+ movl 64(%eax), %eax
+ shrdl $1, %eax, %esi
+ movl %esi, 60(%ecx)
+ shrl %eax
+ movl %eax, 64(%ecx)
+ popl %esi
+ retl
+.Lfunc_end263:
+ .size mcl_fp_shr1_17L, .Lfunc_end263-mcl_fp_shr1_17L
+
+ .globl mcl_fp_add17L
+ .align 16, 0x90
+ .type mcl_fp_add17L,@function
+mcl_fp_add17L: # @mcl_fp_add17L
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $60, %esp
+ movl 88(%esp), %esi
+ movl (%esi), %ecx
+ movl 4(%esi), %eax
+ movl 84(%esp), %edx
+ addl (%edx), %ecx
+ movl %ecx, %ebx
+ adcl 4(%edx), %eax
+ movl %eax, 8(%esp) # 4-byte Spill
+ movl 8(%esi), %eax
+ adcl 8(%edx), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 12(%edx), %ecx
+ movl 16(%edx), %edi
+ adcl 12(%esi), %ecx
+ movl %ecx, 52(%esp) # 4-byte Spill
+ adcl 16(%esi), %edi
+ movl %edi, 4(%esp) # 4-byte Spill
+ movl 20(%edx), %eax
+ adcl 20(%esi), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 24(%edx), %eax
+ adcl 24(%esi), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 28(%edx), %eax
+ adcl 28(%esi), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 32(%edx), %eax
+ adcl 32(%esi), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 36(%edx), %eax
+ adcl 36(%esi), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 40(%edx), %eax
+ adcl 40(%esi), %eax
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl 44(%edx), %eax
+ adcl 44(%esi), %eax
+ movl %eax, 24(%esp) # 4-byte Spill
+ movl 48(%edx), %eax
+ adcl 48(%esi), %eax
+ movl %eax, 20(%esp) # 4-byte Spill
+ movl 52(%edx), %eax
+ adcl 52(%esi), %eax
+ movl %eax, 16(%esp) # 4-byte Spill
+ movl 56(%edx), %eax
+ adcl 56(%esi), %eax
+ movl %eax, 12(%esp) # 4-byte Spill
+ movl 60(%edx), %ebp
+ adcl 60(%esi), %ebp
+ movl 64(%edx), %edx
+ adcl 64(%esi), %edx
+ movl 80(%esp), %esi
+ movl %ebx, (%esi)
+ movl %ebx, %eax
+ movl 8(%esp), %ecx # 4-byte Reload
+ movl %ecx, 4(%esi)
+ movl 56(%esp), %ebx # 4-byte Reload
+ movl %ebx, 8(%esi)
+ movl 52(%esp), %ebx # 4-byte Reload
+ movl %ebx, 12(%esi)
+ movl %edi, 16(%esi)
+ movl 48(%esp), %edi # 4-byte Reload
+ movl %edi, 20(%esi)
+ movl 44(%esp), %edi # 4-byte Reload
+ movl %edi, 24(%esi)
+ movl 40(%esp), %edi # 4-byte Reload
+ movl %edi, 28(%esi)
+ movl 36(%esp), %edi # 4-byte Reload
+ movl %edi, 32(%esi)
+ movl 32(%esp), %edi # 4-byte Reload
+ movl %edi, 36(%esi)
+ movl 28(%esp), %edi # 4-byte Reload
+ movl %edi, 40(%esi)
+ movl 24(%esp), %edi # 4-byte Reload
+ movl %edi, 44(%esi)
+ movl 20(%esp), %edi # 4-byte Reload
+ movl %edi, 48(%esi)
+ movl 16(%esp), %edi # 4-byte Reload
+ movl %edi, 52(%esi)
+ movl 12(%esp), %edi # 4-byte Reload
+ movl %edi, 56(%esi)
+ movl %ebp, 60(%esi)
+ movl %edx, 64(%esi)
+ sbbl %ebx, %ebx
+ andl $1, %ebx
+ movl 92(%esp), %edi
+ subl (%edi), %eax
+ movl %eax, (%esp) # 4-byte Spill
+ sbbl 4(%edi), %ecx
+ movl %ecx, 8(%esp) # 4-byte Spill
+ movl 56(%esp), %eax # 4-byte Reload
+ sbbl 8(%edi), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ movl 52(%esp), %eax # 4-byte Reload
+ sbbl 12(%edi), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 4(%esp), %eax # 4-byte Reload
+ sbbl 16(%edi), %eax
+ movl %eax, 4(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+ sbbl 20(%edi), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 44(%esp), %eax # 4-byte Reload
+ sbbl 24(%edi), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 40(%esp), %eax # 4-byte Reload
+ sbbl 28(%edi), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 36(%esp), %eax # 4-byte Reload
+ sbbl 32(%edi), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 32(%esp), %eax # 4-byte Reload
+ sbbl 36(%edi), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 28(%esp), %eax # 4-byte Reload
+ sbbl 40(%edi), %eax
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl 24(%esp), %eax # 4-byte Reload
+ sbbl 44(%edi), %eax
+ movl %eax, 24(%esp) # 4-byte Spill
+ movl 20(%esp), %eax # 4-byte Reload
+ sbbl 48(%edi), %eax
+ movl %eax, 20(%esp) # 4-byte Spill
+ movl 16(%esp), %eax # 4-byte Reload
+ sbbl 52(%edi), %eax
+ movl %eax, 16(%esp) # 4-byte Spill
+ movl 12(%esp), %eax # 4-byte Reload
+ sbbl 56(%edi), %eax
+ movl %eax, 12(%esp) # 4-byte Spill
+ sbbl 60(%edi), %ebp
+ sbbl 64(%edi), %edx
+ sbbl $0, %ebx
+ testb $1, %bl
+ jne .LBB264_2
+# BB#1: # %nocarry
+ movl (%esp), %edi # 4-byte Reload
+ movl %edi, (%esi)
+ movl 8(%esp), %edi # 4-byte Reload
+ movl %edi, 4(%esi)
+ movl 56(%esp), %edi # 4-byte Reload
+ movl %edi, 8(%esi)
+ movl 52(%esp), %edi # 4-byte Reload
+ movl %edi, 12(%esi)
+ movl 4(%esp), %edi # 4-byte Reload
+ movl %edi, 16(%esi)
+ movl 48(%esp), %edi # 4-byte Reload
+ movl %edi, 20(%esi)
+ movl 44(%esp), %edi # 4-byte Reload
+ movl %edi, 24(%esi)
+ movl 40(%esp), %edi # 4-byte Reload
+ movl %edi, 28(%esi)
+ movl 36(%esp), %edi # 4-byte Reload
+ movl %edi, 32(%esi)
+ movl 32(%esp), %edi # 4-byte Reload
+ movl %edi, 36(%esi)
+ movl 28(%esp), %edi # 4-byte Reload
+ movl %edi, 40(%esi)
+ movl 24(%esp), %edi # 4-byte Reload
+ movl %edi, 44(%esi)
+ movl 20(%esp), %edi # 4-byte Reload
+ movl %edi, 48(%esi)
+ movl 16(%esp), %ecx # 4-byte Reload
+ movl %ecx, 52(%esi)
+ movl 12(%esp), %ecx # 4-byte Reload
+ movl %ecx, 56(%esi)
+ movl %ebp, 60(%esi)
+ movl %edx, 64(%esi)
+.LBB264_2: # %carry
+ addl $60, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end264:
+ .size mcl_fp_add17L, .Lfunc_end264-mcl_fp_add17L
+
+ .globl mcl_fp_addNF17L
+ .align 16, 0x90
+ .type mcl_fp_addNF17L,@function
+mcl_fp_addNF17L: # @mcl_fp_addNF17L
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $132, %esp
+ movl 160(%esp), %eax
+ movl (%eax), %ecx
+ movl 4(%eax), %edx
+ movl 156(%esp), %esi
+ addl (%esi), %ecx
+ movl %ecx, 84(%esp) # 4-byte Spill
+ adcl 4(%esi), %edx
+ movl %edx, 88(%esp) # 4-byte Spill
+ movl 64(%eax), %ecx
+ movl %ecx, 100(%esp) # 4-byte Spill
+ movl 60(%eax), %ecx
+ movl %ecx, 96(%esp) # 4-byte Spill
+ movl 56(%eax), %ecx
+ movl %ecx, 92(%esp) # 4-byte Spill
+ movl 52(%eax), %ebp
+ movl 48(%eax), %ecx
+ movl %ecx, 116(%esp) # 4-byte Spill
+ movl 44(%eax), %ecx
+ movl %ecx, 112(%esp) # 4-byte Spill
+ movl 40(%eax), %ecx
+ movl %ecx, 128(%esp) # 4-byte Spill
+ movl 36(%eax), %ecx
+ movl %ecx, 108(%esp) # 4-byte Spill
+ movl 32(%eax), %ecx
+ movl %ecx, 124(%esp) # 4-byte Spill
+ movl 28(%eax), %ecx
+ movl %ecx, 104(%esp) # 4-byte Spill
+ movl 24(%eax), %ecx
+ movl %ecx, 120(%esp) # 4-byte Spill
+ movl 20(%eax), %ebx
+ movl 16(%eax), %edi
+ movl 12(%eax), %edx
+ movl 8(%eax), %ecx
+ adcl 8(%esi), %ecx
+ movl %ecx, 64(%esp) # 4-byte Spill
+ adcl 12(%esi), %edx
+ movl %edx, 68(%esp) # 4-byte Spill
+ adcl 16(%esi), %edi
+ movl %edi, 72(%esp) # 4-byte Spill
+ adcl 20(%esi), %ebx
+ movl %ebx, 76(%esp) # 4-byte Spill
+ movl 120(%esp), %eax # 4-byte Reload
+ adcl 24(%esi), %eax
+ movl %eax, 120(%esp) # 4-byte Spill
+ movl 104(%esp), %eax # 4-byte Reload
+ adcl 28(%esi), %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 124(%esp), %eax # 4-byte Reload
+ adcl 32(%esi), %eax
+ movl %eax, 124(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ adcl 36(%esi), %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 128(%esp), %eax # 4-byte Reload
+ adcl 40(%esi), %eax
+ movl %eax, 128(%esp) # 4-byte Spill
+ movl 112(%esp), %eax # 4-byte Reload
+ adcl 44(%esi), %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 116(%esp), %eax # 4-byte Reload
+ adcl 48(%esi), %eax
+ movl %eax, 116(%esp) # 4-byte Spill
+ adcl 52(%esi), %ebp
+ movl %ebp, 80(%esp) # 4-byte Spill
+ movl 92(%esp), %ebp # 4-byte Reload
+ adcl 56(%esi), %ebp
+ movl %ebp, 92(%esp) # 4-byte Spill
+ movl 96(%esp), %ebp # 4-byte Reload
+ adcl 60(%esi), %ebp
+ movl %ebp, 96(%esp) # 4-byte Spill
+ movl 100(%esp), %ebp # 4-byte Reload
+ adcl 64(%esi), %ebp
+ movl %ebp, 100(%esp) # 4-byte Spill
+ movl 164(%esp), %esi
+ movl 84(%esp), %eax # 4-byte Reload
+ subl (%esi), %eax
+ movl %eax, (%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ sbbl 4(%esi), %eax
+ movl %eax, 4(%esp) # 4-byte Spill
+ sbbl 8(%esi), %ecx
+ movl %ecx, 8(%esp) # 4-byte Spill
+ sbbl 12(%esi), %edx
+ sbbl 16(%esi), %edi
+ movl %edi, 12(%esp) # 4-byte Spill
+ sbbl 20(%esi), %ebx
+ movl %ebx, 16(%esp) # 4-byte Spill
+ movl 120(%esp), %ebx # 4-byte Reload
+ sbbl 24(%esi), %ebx
+ movl %ebx, 20(%esp) # 4-byte Spill
+ movl 104(%esp), %eax # 4-byte Reload
+ sbbl 28(%esi), %eax
+ movl %eax, 24(%esp) # 4-byte Spill
+ movl 124(%esp), %eax # 4-byte Reload
+ sbbl 32(%esi), %eax
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ sbbl 36(%esi), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 128(%esp), %ecx # 4-byte Reload
+ sbbl 40(%esi), %ecx
+ movl %ecx, 36(%esp) # 4-byte Spill
+ movl 112(%esp), %ecx # 4-byte Reload
+ sbbl 44(%esi), %ecx
+ movl %ecx, 40(%esp) # 4-byte Spill
+ movl 116(%esp), %ecx # 4-byte Reload
+ sbbl 48(%esi), %ecx
+ movl %ecx, 44(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ movl %eax, %ecx
+ sbbl 52(%esi), %ecx
+ movl %ecx, 48(%esp) # 4-byte Spill
+ movl 92(%esp), %eax # 4-byte Reload
+ movl %eax, %ecx
+ movl %eax, %ebp
+ sbbl 56(%esi), %ecx
+ movl %ecx, 52(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ movl %eax, %ecx
+ sbbl 60(%esi), %ecx
+ movl %ecx, 56(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ movl %eax, %ebx
+ sbbl 64(%esi), %ebx
+ movl %ebx, 60(%esp) # 4-byte Spill
+ movl %ebx, %esi
+ sarl $31, %esi
+ testl %esi, %esi
+ movl 84(%esp), %esi # 4-byte Reload
+ js .LBB265_2
+# BB#1:
+ movl (%esp), %esi # 4-byte Reload
+.LBB265_2:
+ movl 152(%esp), %ebx
+ movl %esi, (%ebx)
+ movl 88(%esp), %eax # 4-byte Reload
+ js .LBB265_4
+# BB#3:
+ movl 4(%esp), %eax # 4-byte Reload
+.LBB265_4:
+ movl %eax, 4(%ebx)
+ movl 108(%esp), %eax # 4-byte Reload
+ movl 76(%esp), %esi # 4-byte Reload
+ movl 64(%esp), %edi # 4-byte Reload
+ js .LBB265_6
+# BB#5:
+ movl 8(%esp), %edi # 4-byte Reload
+.LBB265_6:
+ movl %edi, 8(%ebx)
+ movl 116(%esp), %edi # 4-byte Reload
+ movl 68(%esp), %ecx # 4-byte Reload
+ js .LBB265_8
+# BB#7:
+ movl %edx, %ecx
+.LBB265_8:
+ movl %ecx, 12(%ebx)
+ movl 104(%esp), %ecx # 4-byte Reload
+ movl 72(%esp), %edx # 4-byte Reload
+ js .LBB265_10
+# BB#9:
+ movl 12(%esp), %edx # 4-byte Reload
+.LBB265_10:
+ movl %edx, 16(%ebx)
+ movl %ebp, %edx
+ js .LBB265_12
+# BB#11:
+ movl 16(%esp), %esi # 4-byte Reload
+.LBB265_12:
+ movl %esi, 20(%ebx)
+ movl 112(%esp), %ebp # 4-byte Reload
+ js .LBB265_14
+# BB#13:
+ movl 20(%esp), %esi # 4-byte Reload
+ movl %esi, 120(%esp) # 4-byte Spill
+.LBB265_14:
+ movl 120(%esp), %esi # 4-byte Reload
+ movl %esi, 24(%ebx)
+ js .LBB265_16
+# BB#15:
+ movl 24(%esp), %ecx # 4-byte Reload
+.LBB265_16:
+ movl %ecx, 28(%ebx)
+ js .LBB265_18
+# BB#17:
+ movl 28(%esp), %ecx # 4-byte Reload
+ movl %ecx, 124(%esp) # 4-byte Spill
+.LBB265_18:
+ movl 124(%esp), %ecx # 4-byte Reload
+ movl %ecx, 32(%ebx)
+ js .LBB265_20
+# BB#19:
+ movl 32(%esp), %eax # 4-byte Reload
+.LBB265_20:
+ movl %eax, 36(%ebx)
+ movl 100(%esp), %ecx # 4-byte Reload
+ js .LBB265_22
+# BB#21:
+ movl 36(%esp), %eax # 4-byte Reload
+ movl %eax, 128(%esp) # 4-byte Spill
+.LBB265_22:
+ movl 128(%esp), %eax # 4-byte Reload
+ movl %eax, 40(%ebx)
+ js .LBB265_24
+# BB#23:
+ movl 40(%esp), %ebp # 4-byte Reload
+.LBB265_24:
+ movl %ebp, 44(%ebx)
+ js .LBB265_26
+# BB#25:
+ movl 44(%esp), %edi # 4-byte Reload
+.LBB265_26:
+ movl %edi, 48(%ebx)
+ movl 80(%esp), %eax # 4-byte Reload
+ js .LBB265_28
+# BB#27:
+ movl 48(%esp), %eax # 4-byte Reload
+.LBB265_28:
+ movl %eax, 52(%ebx)
+ js .LBB265_30
+# BB#29:
+ movl 52(%esp), %edx # 4-byte Reload
+.LBB265_30:
+ movl %edx, 56(%ebx)
+ movl 96(%esp), %eax # 4-byte Reload
+ js .LBB265_32
+# BB#31:
+ movl 56(%esp), %eax # 4-byte Reload
+.LBB265_32:
+ movl %eax, 60(%ebx)
+ js .LBB265_34
+# BB#33:
+ movl 60(%esp), %ecx # 4-byte Reload
+.LBB265_34:
+ movl %ecx, 64(%ebx)
+ addl $132, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end265:
+ .size mcl_fp_addNF17L, .Lfunc_end265-mcl_fp_addNF17L
+
+ .globl mcl_fp_sub17L
+ .align 16, 0x90
+ .type mcl_fp_sub17L,@function
+mcl_fp_sub17L: # @mcl_fp_sub17L
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $64, %esp
+ movl 88(%esp), %esi
+ movl (%esi), %eax
+ movl 4(%esi), %ecx
+ xorl %ebx, %ebx
+ movl 92(%esp), %edi
+ subl (%edi), %eax
+ movl %eax, 56(%esp) # 4-byte Spill
+ sbbl 4(%edi), %ecx
+ movl %ecx, 16(%esp) # 4-byte Spill
+ movl 8(%esi), %eax
+ sbbl 8(%edi), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 12(%esi), %eax
+ sbbl 12(%edi), %eax
+ movl %eax, 60(%esp) # 4-byte Spill
+ movl 16(%esi), %eax
+ sbbl 16(%edi), %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 20(%esi), %eax
+ sbbl 20(%edi), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 24(%esi), %eax
+ sbbl 24(%edi), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 28(%esi), %eax
+ sbbl 28(%edi), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 32(%esi), %eax
+ sbbl 32(%edi), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 36(%esi), %eax
+ sbbl 36(%edi), %eax
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl 40(%esi), %eax
+ sbbl 40(%edi), %eax
+ movl %eax, 24(%esp) # 4-byte Spill
+ movl 44(%esi), %eax
+ sbbl 44(%edi), %eax
+ movl %eax, 20(%esp) # 4-byte Spill
+ movl 48(%esi), %edx
+ sbbl 48(%edi), %edx
+ movl %edx, 12(%esp) # 4-byte Spill
+ movl 52(%esi), %ecx
+ sbbl 52(%edi), %ecx
+ movl %ecx, 8(%esp) # 4-byte Spill
+ movl 56(%esi), %eax
+ sbbl 56(%edi), %eax
+ movl %eax, 4(%esp) # 4-byte Spill
+ movl 60(%esi), %ebp
+ sbbl 60(%edi), %ebp
+ movl 64(%esi), %esi
+ sbbl 64(%edi), %esi
+ sbbl $0, %ebx
+ testb $1, %bl
+ movl 84(%esp), %ebx
+ movl 56(%esp), %edi # 4-byte Reload
+ movl %edi, (%ebx)
+ movl 16(%esp), %edi # 4-byte Reload
+ movl %edi, 4(%ebx)
+ movl 48(%esp), %edi # 4-byte Reload
+ movl %edi, 8(%ebx)
+ movl 60(%esp), %edi # 4-byte Reload
+ movl %edi, 12(%ebx)
+ movl 52(%esp), %edi # 4-byte Reload
+ movl %edi, 16(%ebx)
+ movl 44(%esp), %edi # 4-byte Reload
+ movl %edi, 20(%ebx)
+ movl 40(%esp), %edi # 4-byte Reload
+ movl %edi, 24(%ebx)
+ movl 36(%esp), %edi # 4-byte Reload
+ movl %edi, 28(%ebx)
+ movl 32(%esp), %edi # 4-byte Reload
+ movl %edi, 32(%ebx)
+ movl 28(%esp), %edi # 4-byte Reload
+ movl %edi, 36(%ebx)
+ movl 24(%esp), %edi # 4-byte Reload
+ movl %edi, 40(%ebx)
+ movl 20(%esp), %edi # 4-byte Reload
+ movl %edi, 44(%ebx)
+ movl %edx, 48(%ebx)
+ movl %ecx, 52(%ebx)
+ movl %eax, 56(%ebx)
+ movl %ebp, 60(%ebx)
+ movl %esi, 64(%ebx)
+ je .LBB266_2
+# BB#1: # %carry
+ movl %esi, (%esp) # 4-byte Spill
+ movl 96(%esp), %esi
+ movl 56(%esp), %ecx # 4-byte Reload
+ addl (%esi), %ecx
+ movl %ecx, (%ebx)
+ movl 16(%esp), %edx # 4-byte Reload
+ adcl 4(%esi), %edx
+ movl %edx, 4(%ebx)
+ movl 48(%esp), %edi # 4-byte Reload
+ adcl 8(%esi), %edi
+ movl 12(%esi), %eax
+ adcl 60(%esp), %eax # 4-byte Folded Reload
+ movl %edi, 8(%ebx)
+ movl 16(%esi), %ecx
+ adcl 52(%esp), %ecx # 4-byte Folded Reload
+ movl %eax, 12(%ebx)
+ movl 20(%esi), %eax
+ adcl 44(%esp), %eax # 4-byte Folded Reload
+ movl %ecx, 16(%ebx)
+ movl 24(%esi), %ecx
+ adcl 40(%esp), %ecx # 4-byte Folded Reload
+ movl %eax, 20(%ebx)
+ movl 28(%esi), %eax
+ adcl 36(%esp), %eax # 4-byte Folded Reload
+ movl %ecx, 24(%ebx)
+ movl 32(%esi), %ecx
+ adcl 32(%esp), %ecx # 4-byte Folded Reload
+ movl %eax, 28(%ebx)
+ movl 36(%esi), %eax
+ adcl 28(%esp), %eax # 4-byte Folded Reload
+ movl %ecx, 32(%ebx)
+ movl 40(%esi), %ecx
+ adcl 24(%esp), %ecx # 4-byte Folded Reload
+ movl %eax, 36(%ebx)
+ movl 44(%esi), %eax
+ adcl 20(%esp), %eax # 4-byte Folded Reload
+ movl %ecx, 40(%ebx)
+ movl 48(%esi), %ecx
+ adcl 12(%esp), %ecx # 4-byte Folded Reload
+ movl %eax, 44(%ebx)
+ movl 52(%esi), %eax
+ adcl 8(%esp), %eax # 4-byte Folded Reload
+ movl %ecx, 48(%ebx)
+ movl 56(%esi), %ecx
+ adcl 4(%esp), %ecx # 4-byte Folded Reload
+ movl %eax, 52(%ebx)
+ movl %ecx, 56(%ebx)
+ movl 60(%esi), %eax
+ adcl %ebp, %eax
+ movl %eax, 60(%ebx)
+ movl 64(%esi), %eax
+ adcl (%esp), %eax # 4-byte Folded Reload
+ movl %eax, 64(%ebx)
+.LBB266_2: # %nocarry
+ addl $64, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end266:
+ .size mcl_fp_sub17L, .Lfunc_end266-mcl_fp_sub17L
+
+ .globl mcl_fp_subNF17L
+ .align 16, 0x90
+ .type mcl_fp_subNF17L,@function
+mcl_fp_subNF17L: # @mcl_fp_subNF17L
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $112, %esp
+ movl 136(%esp), %ecx
+ movl (%ecx), %esi
+ movl 4(%ecx), %edx
+ movl 140(%esp), %edi
+ subl (%edi), %esi
+ movl %esi, 68(%esp) # 4-byte Spill
+ sbbl 4(%edi), %edx
+ movl %edx, 72(%esp) # 4-byte Spill
+ movl 64(%ecx), %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 60(%ecx), %edx
+ movl %edx, 104(%esp) # 4-byte Spill
+ movl 56(%ecx), %edx
+ movl %edx, 100(%esp) # 4-byte Spill
+ movl 52(%ecx), %edx
+ movl %edx, 96(%esp) # 4-byte Spill
+ movl 48(%ecx), %edx
+ movl %edx, 92(%esp) # 4-byte Spill
+ movl 44(%ecx), %edx
+ movl %edx, 88(%esp) # 4-byte Spill
+ movl 40(%ecx), %esi
+ movl %esi, 108(%esp) # 4-byte Spill
+ movl 36(%ecx), %edx
+ movl %edx, 80(%esp) # 4-byte Spill
+ movl 32(%ecx), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 28(%ecx), %ebp
+ movl 24(%ecx), %ebx
+ movl 20(%ecx), %esi
+ movl 16(%ecx), %edx
+ movl 12(%ecx), %eax
+ movl 8(%ecx), %ecx
+ sbbl 8(%edi), %ecx
+ movl %ecx, 44(%esp) # 4-byte Spill
+ sbbl 12(%edi), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ sbbl 16(%edi), %edx
+ movl %edx, 52(%esp) # 4-byte Spill
+ sbbl 20(%edi), %esi
+ movl %esi, 56(%esp) # 4-byte Spill
+ sbbl 24(%edi), %ebx
+ movl %ebx, 60(%esp) # 4-byte Spill
+ sbbl 28(%edi), %ebp
+ movl %ebp, 64(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ sbbl 32(%edi), %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 80(%esp), %ecx # 4-byte Reload
+ sbbl 36(%edi), %ecx
+ movl %ecx, 80(%esp) # 4-byte Spill
+ movl 108(%esp), %ecx # 4-byte Reload
+ sbbl 40(%edi), %ecx
+ movl %ecx, 108(%esp) # 4-byte Spill
+ movl 88(%esp), %ecx # 4-byte Reload
+ sbbl 44(%edi), %ecx
+ movl %ecx, 88(%esp) # 4-byte Spill
+ movl 92(%esp), %ecx # 4-byte Reload
+ sbbl 48(%edi), %ecx
+ movl %ecx, 92(%esp) # 4-byte Spill
+ movl 96(%esp), %ecx # 4-byte Reload
+ sbbl 52(%edi), %ecx
+ movl %ecx, 96(%esp) # 4-byte Spill
+ movl 100(%esp), %ecx # 4-byte Reload
+ sbbl 56(%edi), %ecx
+ movl %ecx, 100(%esp) # 4-byte Spill
+ movl 104(%esp), %ecx # 4-byte Reload
+ sbbl 60(%edi), %ecx
+ movl %ecx, 104(%esp) # 4-byte Spill
+ movl 84(%esp), %ecx # 4-byte Reload
+ sbbl 64(%edi), %ecx
+ movl %ecx, 84(%esp) # 4-byte Spill
+ movl %ecx, %ebx
+ sarl $31, %ebx
+ movl %ebx, %edx
+ shldl $1, %ecx, %edx
+ movl 144(%esp), %eax
+ movl 28(%eax), %ecx
+ andl %edx, %ecx
+ movl %ecx, 28(%esp) # 4-byte Spill
+ movl 12(%eax), %ecx
+ andl %edx, %ecx
+ movl %ecx, 16(%esp) # 4-byte Spill
+ movl 4(%eax), %ecx
+ andl %edx, %ecx
+ movl %ecx, %esi
+ andl (%eax), %edx
+ movl 64(%eax), %ecx
+ andl %ebx, %ecx
+ movl %ecx, 40(%esp) # 4-byte Spill
+ movl 60(%eax), %ecx
+ andl %ebx, %ecx
+ movl %ecx, 36(%esp) # 4-byte Spill
+ roll %ebx
+ movl 56(%eax), %ecx
+ andl %ebx, %ecx
+ movl %ecx, 32(%esp) # 4-byte Spill
+ movl 52(%eax), %ecx
+ andl %ebx, %ecx
+ movl %ecx, 24(%esp) # 4-byte Spill
+ movl 48(%eax), %ecx
+ andl %ebx, %ecx
+ movl %ecx, 20(%esp) # 4-byte Spill
+ movl 44(%eax), %ecx
+ andl %ebx, %ecx
+ movl %ecx, 12(%esp) # 4-byte Spill
+ movl 40(%eax), %ecx
+ andl %ebx, %ecx
+ movl %ecx, 8(%esp) # 4-byte Spill
+ movl 36(%eax), %ecx
+ andl %ebx, %ecx
+ movl %ecx, 4(%esp) # 4-byte Spill
+ movl 32(%eax), %ecx
+ andl %ebx, %ecx
+ movl %ecx, (%esp) # 4-byte Spill
+ movl 24(%eax), %ebp
+ andl %ebx, %ebp
+ movl 20(%eax), %edi
+ andl %ebx, %edi
+ movl 16(%eax), %ecx
+ andl %ebx, %ecx
+ andl 8(%eax), %ebx
+ addl 68(%esp), %edx # 4-byte Folded Reload
+ movl %esi, %eax
+ adcl 72(%esp), %eax # 4-byte Folded Reload
+ movl 132(%esp), %esi
+ movl %edx, (%esi)
+ adcl 44(%esp), %ebx # 4-byte Folded Reload
+ movl %eax, 4(%esi)
+ movl 16(%esp), %eax # 4-byte Reload
+ adcl 48(%esp), %eax # 4-byte Folded Reload
+ movl %ebx, 8(%esi)
+ adcl 52(%esp), %ecx # 4-byte Folded Reload
+ movl %eax, 12(%esi)
+ adcl 56(%esp), %edi # 4-byte Folded Reload
+ movl %ecx, 16(%esi)
+ adcl 60(%esp), %ebp # 4-byte Folded Reload
+ movl %edi, 20(%esi)
+ movl 28(%esp), %eax # 4-byte Reload
+ adcl 64(%esp), %eax # 4-byte Folded Reload
+ movl %ebp, 24(%esi)
+ movl (%esp), %ecx # 4-byte Reload
+ adcl 76(%esp), %ecx # 4-byte Folded Reload
+ movl %eax, 28(%esi)
+ movl 4(%esp), %eax # 4-byte Reload
+ adcl 80(%esp), %eax # 4-byte Folded Reload
+ movl %ecx, 32(%esi)
+ movl 8(%esp), %ecx # 4-byte Reload
+ adcl 108(%esp), %ecx # 4-byte Folded Reload
+ movl %eax, 36(%esi)
+ movl 12(%esp), %eax # 4-byte Reload
+ adcl 88(%esp), %eax # 4-byte Folded Reload
+ movl %ecx, 40(%esi)
+ movl 20(%esp), %ecx # 4-byte Reload
+ adcl 92(%esp), %ecx # 4-byte Folded Reload
+ movl %eax, 44(%esi)
+ movl 24(%esp), %eax # 4-byte Reload
+ adcl 96(%esp), %eax # 4-byte Folded Reload
+ movl %ecx, 48(%esi)
+ movl 32(%esp), %ecx # 4-byte Reload
+ adcl 100(%esp), %ecx # 4-byte Folded Reload
+ movl %eax, 52(%esi)
+ movl 36(%esp), %eax # 4-byte Reload
+ adcl 104(%esp), %eax # 4-byte Folded Reload
+ movl %ecx, 56(%esi)
+ movl %eax, 60(%esi)
+ movl 40(%esp), %eax # 4-byte Reload
+ adcl 84(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 64(%esi)
+ addl $112, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end267:
+ .size mcl_fp_subNF17L, .Lfunc_end267-mcl_fp_subNF17L
+
+ .globl mcl_fpDbl_add17L
+ .align 16, 0x90
+ .type mcl_fpDbl_add17L,@function
+mcl_fpDbl_add17L: # @mcl_fpDbl_add17L
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $128, %esp
+ movl 156(%esp), %ecx
+ movl 152(%esp), %edx
+ movl 12(%edx), %edi
+ movl 16(%edx), %esi
+ movl 8(%ecx), %ebx
+ movl (%ecx), %ebp
+ addl (%edx), %ebp
+ movl 148(%esp), %eax
+ movl %ebp, (%eax)
+ movl 4(%ecx), %ebp
+ adcl 4(%edx), %ebp
+ adcl 8(%edx), %ebx
+ adcl 12(%ecx), %edi
+ adcl 16(%ecx), %esi
+ movl %ebp, 4(%eax)
+ movl 76(%ecx), %ebp
+ movl %ebx, 8(%eax)
+ movl 20(%ecx), %ebx
+ movl %edi, 12(%eax)
+ movl 20(%edx), %edi
+ adcl %ebx, %edi
+ movl 24(%ecx), %ebx
+ movl %esi, 16(%eax)
+ movl 24(%edx), %esi
+ adcl %ebx, %esi
+ movl 28(%ecx), %ebx
+ movl %edi, 20(%eax)
+ movl 28(%edx), %edi
+ adcl %ebx, %edi
+ movl 32(%ecx), %ebx
+ movl %esi, 24(%eax)
+ movl 32(%edx), %esi
+ adcl %ebx, %esi
+ movl 36(%ecx), %ebx
+ movl %edi, 28(%eax)
+ movl 36(%edx), %edi
+ adcl %ebx, %edi
+ movl 40(%ecx), %ebx
+ movl %esi, 32(%eax)
+ movl 40(%edx), %esi
+ adcl %ebx, %esi
+ movl 44(%ecx), %ebx
+ movl %edi, 36(%eax)
+ movl 44(%edx), %edi
+ adcl %ebx, %edi
+ movl 48(%ecx), %ebx
+ movl %esi, 40(%eax)
+ movl 48(%edx), %esi
+ adcl %ebx, %esi
+ movl 52(%ecx), %ebx
+ movl %edi, 44(%eax)
+ movl 52(%edx), %edi
+ adcl %ebx, %edi
+ movl 56(%ecx), %ebx
+ movl %esi, 48(%eax)
+ movl 56(%edx), %esi
+ adcl %ebx, %esi
+ movl 60(%ecx), %ebx
+ movl %edi, 52(%eax)
+ movl 60(%edx), %edi
+ adcl %ebx, %edi
+ movl 64(%ecx), %ebx
+ movl %esi, 56(%eax)
+ movl 64(%edx), %esi
+ adcl %ebx, %esi
+ movl 68(%ecx), %ebx
+ movl %edi, 60(%eax)
+ movl 68(%edx), %edi
+ adcl %ebx, %edi
+ movl %edi, 92(%esp) # 4-byte Spill
+ movl 72(%ecx), %edi
+ movl %esi, 64(%eax)
+ movl 72(%edx), %eax
+ adcl %edi, %eax
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 76(%edx), %eax
+ adcl %ebp, %eax
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 80(%ecx), %esi
+ movl 80(%edx), %eax
+ adcl %esi, %eax
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 84(%ecx), %esi
+ movl 84(%edx), %eax
+ adcl %esi, %eax
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 88(%ecx), %esi
+ movl 88(%edx), %eax
+ adcl %esi, %eax
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 92(%ecx), %esi
+ movl 92(%edx), %eax
+ adcl %esi, %eax
+ movl %eax, 116(%esp) # 4-byte Spill
+ movl 96(%ecx), %esi
+ movl 96(%edx), %eax
+ adcl %esi, %eax
+ movl %eax, 120(%esp) # 4-byte Spill
+ movl 100(%ecx), %esi
+ movl 100(%edx), %eax
+ adcl %esi, %eax
+ movl %eax, 124(%esp) # 4-byte Spill
+ movl 104(%ecx), %esi
+ movl 104(%edx), %eax
+ adcl %esi, %eax
+ movl %eax, 76(%esp) # 4-byte Spill
+ movl 108(%ecx), %esi
+ movl 108(%edx), %eax
+ adcl %esi, %eax
+ movl %eax, 80(%esp) # 4-byte Spill
+ movl 112(%ecx), %esi
+ movl 112(%edx), %eax
+ adcl %esi, %eax
+ movl %eax, 84(%esp) # 4-byte Spill
+ movl 116(%ecx), %esi
+ movl 116(%edx), %eax
+ adcl %esi, %eax
+ movl %eax, 88(%esp) # 4-byte Spill
+ movl 120(%ecx), %edi
+ movl 120(%edx), %esi
+ adcl %edi, %esi
+ movl %esi, 64(%esp) # 4-byte Spill
+ movl 124(%ecx), %ebx
+ movl 124(%edx), %edi
+ adcl %ebx, %edi
+ movl %edi, 68(%esp) # 4-byte Spill
+ movl 128(%ecx), %ebx
+ movl 128(%edx), %ebp
+ adcl %ebx, %ebp
+ movl %ebp, 72(%esp) # 4-byte Spill
+ movl 132(%ecx), %ecx
+ movl 132(%edx), %edx
+ adcl %ecx, %edx
+ sbbl %ecx, %ecx
+ andl $1, %ecx
+ movl 160(%esp), %ebx
+ movl 92(%esp), %eax # 4-byte Reload
+ subl (%ebx), %eax
+ movl %eax, 48(%esp) # 4-byte Spill
+ movl 96(%esp), %eax # 4-byte Reload
+ sbbl 4(%ebx), %eax
+ movl %eax, 44(%esp) # 4-byte Spill
+ movl 100(%esp), %eax # 4-byte Reload
+ sbbl 8(%ebx), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+ movl 104(%esp), %eax # 4-byte Reload
+ sbbl 12(%ebx), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+ movl 108(%esp), %eax # 4-byte Reload
+ sbbl 16(%ebx), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+ movl 112(%esp), %eax # 4-byte Reload
+ sbbl 20(%ebx), %eax
+ movl %eax, 28(%esp) # 4-byte Spill
+ movl 116(%esp), %eax # 4-byte Reload
+ sbbl 24(%ebx), %eax
+ movl %eax, 24(%esp) # 4-byte Spill
+ movl 120(%esp), %eax # 4-byte Reload
+ sbbl 28(%ebx), %eax
+ movl %eax, 20(%esp) # 4-byte Spill
+ movl 124(%esp), %eax # 4-byte Reload
+ sbbl 32(%ebx), %eax
+ movl %eax, 16(%esp) # 4-byte Spill
+ movl 76(%esp), %eax # 4-byte Reload
+ sbbl 36(%ebx), %eax
+ movl %eax, 12(%esp) # 4-byte Spill
+ movl 80(%esp), %eax # 4-byte Reload
+ sbbl 40(%ebx), %eax
+ movl %eax, 8(%esp) # 4-byte Spill
+ movl 84(%esp), %eax # 4-byte Reload
+ sbbl 44(%ebx), %eax
+ movl %eax, 4(%esp) # 4-byte Spill
+ movl 88(%esp), %eax # 4-byte Reload
+ sbbl 48(%ebx), %eax
+ movl %eax, (%esp) # 4-byte Spill
+ sbbl 52(%ebx), %esi
+ movl %esi, 52(%esp) # 4-byte Spill
+ sbbl 56(%ebx), %edi
+ movl %edi, 56(%esp) # 4-byte Spill
+ sbbl 60(%ebx), %ebp
+ movl %ebp, 60(%esp) # 4-byte Spill
+ movl %edx, %ebp
+ sbbl 64(%ebx), %ebp
+ sbbl $0, %ecx
+ andl $1, %ecx
+ jne .LBB268_2
+# BB#1:
+ movl %ebp, %edx
+.LBB268_2:
+ testb %cl, %cl
+ movl 92(%esp), %eax # 4-byte Reload
+ movl 88(%esp), %esi # 4-byte Reload
+ movl 84(%esp), %edi # 4-byte Reload
+ movl 80(%esp), %ebx # 4-byte Reload
+ movl 76(%esp), %ebp # 4-byte Reload
+ jne .LBB268_4
+# BB#3:
+ movl (%esp), %esi # 4-byte Reload
+ movl 4(%esp), %edi # 4-byte Reload
+ movl 8(%esp), %ebx # 4-byte Reload
+ movl 12(%esp), %ebp # 4-byte Reload
+ movl 16(%esp), %eax # 4-byte Reload
+ movl %eax, 124(%esp) # 4-byte Spill
+ movl 20(%esp), %eax # 4-byte Reload
+ movl %eax, 120(%esp) # 4-byte Spill
+ movl 24(%esp), %eax # 4-byte Reload
+ movl %eax, 116(%esp) # 4-byte Spill
+ movl 28(%esp), %eax # 4-byte Reload
+ movl %eax, 112(%esp) # 4-byte Spill
+ movl 32(%esp), %eax # 4-byte Reload
+ movl %eax, 108(%esp) # 4-byte Spill
+ movl 36(%esp), %eax # 4-byte Reload
+ movl %eax, 104(%esp) # 4-byte Spill
+ movl 40(%esp), %eax # 4-byte Reload
+ movl %eax, 100(%esp) # 4-byte Spill
+ movl 44(%esp), %eax # 4-byte Reload
+ movl %eax, 96(%esp) # 4-byte Spill
+ movl 48(%esp), %eax # 4-byte Reload
+.LBB268_4:
+ movl 148(%esp), %ecx
+ movl %eax, 68(%ecx)
+ movl %ecx, %eax
+ movl 96(%esp), %ecx # 4-byte Reload
+ movl %ecx, 72(%eax)
+ movl 100(%esp), %ecx # 4-byte Reload
+ movl %ecx, 76(%eax)
+ movl 104(%esp), %ecx # 4-byte Reload
+ movl %ecx, 80(%eax)
+ movl 108(%esp), %ecx # 4-byte Reload
+ movl %ecx, 84(%eax)
+ movl 112(%esp), %ecx # 4-byte Reload
+ movl %ecx, 88(%eax)
+ movl 116(%esp), %ecx # 4-byte Reload
+ movl %ecx, 92(%eax)
+ movl 120(%esp), %ecx # 4-byte Reload
+ movl %ecx, 96(%eax)
+ movl 124(%esp), %ecx # 4-byte Reload
+ movl %ecx, 100(%eax)
+ movl %ebp, 104(%eax)
+ movl %ebx, 108(%eax)
+ movl %edi, 112(%eax)
+ movl %esi, 116(%eax)
+ movl 72(%esp), %ecx # 4-byte Reload
+ movl 64(%esp), %esi # 4-byte Reload
+ jne .LBB268_6
+# BB#5:
+ movl 52(%esp), %esi # 4-byte Reload
+.LBB268_6:
+ movl %esi, 120(%eax)
+ movl 68(%esp), %esi # 4-byte Reload
+ jne .LBB268_8
+# BB#7:
+ movl 56(%esp), %esi # 4-byte Reload
+.LBB268_8:
+ movl %esi, 124(%eax)
+ jne .LBB268_10
+# BB#9:
+ movl 60(%esp), %ecx # 4-byte Reload
+.LBB268_10:
+ movl %ecx, 128(%eax)
+ movl %edx, 132(%eax)
+ addl $128, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end268:
+ .size mcl_fpDbl_add17L, .Lfunc_end268-mcl_fpDbl_add17L
+
+ .globl mcl_fpDbl_sub17L
+ .align 16, 0x90
+ .type mcl_fpDbl_sub17L,@function
+mcl_fpDbl_sub17L: # @mcl_fpDbl_sub17L
+# BB#0:
+ pushl %ebp
+ pushl %ebx
+ pushl %edi
+ pushl %esi
+ subl $116, %esp
+ movl 140(%esp), %edx
+ movl (%edx), %eax
+ movl 4(%edx), %edi
+ movl 144(%esp), %esi
+ subl (%esi), %eax
+ sbbl 4(%esi), %edi
+ movl 8(%edx), %ebx
+ sbbl 8(%esi), %ebx
+ movl 136(%esp), %ecx
+ movl %eax, (%ecx)
+ movl 12(%edx), %eax
+ sbbl 12(%esi), %eax
+ movl %edi, 4(%ecx)
+ movl 16(%edx), %edi
+ sbbl 16(%esi), %edi
+ movl %ebx, 8(%ecx)
+ movl 20(%esi), %ebx
+ movl %eax, 12(%ecx)
+ movl 20(%edx), %eax
+ sbbl %ebx, %eax
+ movl 24(%esi), %ebx
+ movl %edi, 16(%ecx)
+ movl 24(%edx), %edi
+ sbbl %ebx, %edi
+ movl 28(%esi), %ebx
+ movl %eax, 20(%ecx)
+ movl 28(%edx), %eax
+ sbbl %ebx, %eax
+ movl 32(%esi), %ebx
+ movl %edi, 24(%ecx)
+ movl 32(%edx), %edi
+ sbbl %ebx, %edi
+ movl 36(%esi), %ebx
+ movl %eax, 28(%ecx)
+ movl 36(%edx), %eax
+ sbbl %ebx, %eax
+ movl 40(%esi), %ebx
+ movl %edi, 32(%ecx)
+ movl 40(%edx), %edi
+ sbbl %ebx, %edi
+ movl 44(%esi), %ebx
+ movl %eax, 36(%ecx)
+ movl 44(%edx), %eax
+ sbbl %ebx, %eax
+ movl 48(%esi), %ebx
+ movl %edi, 40(%ecx)
+ movl 48(%edx), %edi
+ sbbl %ebx, %edi
+ movl 52(%esi), %ebx
+ movl %eax, 44(%ecx)
+ movl 52(%edx), %eax
+ sbbl %ebx, %eax
+ movl 56(%esi), %ebx
+ movl %edi, 48(%ecx)
+ movl 56(%edx), %edi
+ sbbl %ebx, %edi
+ movl 60(%esi), %ebx
+ movl %eax, 52(%ecx)
+ movl 60(%edx), %eax
+ sbbl %ebx, %eax
+ movl 64(%esi), %ebx
+ movl %edi, 56(%ecx)
+ movl 64(%edx), %edi
+ sbbl %ebx, %edi
+ movl 68(%esi), %ebx
+ movl %eax, 60(%ecx)
+ movl 68(%edx), %eax
+ sbbl %ebx, %eax
+ movl %eax, 52(%esp) # 4-byte Spill
+ movl 72(%esi), %eax
+ movl %edi, 64(%ecx)
+ movl 72(%edx), %edi
+ sbbl %eax, %edi
+ movl %edi, 44(%esp) # 4-byte Spill
+ movl 76(%esi), %eax
+ movl 76(%edx), %edi
+ sbbl %eax, %edi
+ movl %edi, 48(%esp) # 4-byte Spill
+ movl 80(%esi), %eax
+ movl 80(%edx), %edi
+ sbbl %eax, %edi
+ movl %edi, 56(%esp) # 4-byte Spill
+ movl 84(%esi), %eax
+ movl 84(%edx), %edi
+ sbbl %eax, %edi
+ movl %edi, 60(%esp) # 4-byte Spill
+ movl 88(%esi), %eax
+ movl 88(%edx), %edi
+ sbbl %eax, %edi
+ movl %edi, 64(%esp) # 4-byte Spill
+ movl 92(%esi), %eax
+ movl 92(%edx), %edi
+ sbbl %eax, %edi
+ movl %edi, 68(%esp) # 4-byte Spill
+ movl 96(%esi), %eax
+ movl 96(%edx), %edi
+ sbbl %eax, %edi
+ movl %edi, 72(%esp) # 4-byte Spill
+ movl 100(%esi), %eax
+ movl 100(%edx), %edi
+ sbbl %eax, %edi
+ movl %edi, 80(%esp) # 4-byte Spill
+ movl 104(%esi), %eax
+ movl 104(%edx), %edi
+ sbbl %eax, %edi
+ movl %edi, 84(%esp) # 4-byte Spill
+ movl 108(%esi), %eax
+ movl 108(%edx), %edi
+ sbbl %eax, %edi
+ movl %edi, 88(%esp) # 4-byte Spill
+ movl 112(%esi), %eax
+ movl 112(%edx), %edi
+ sbbl %eax, %edi
+ movl %edi, 92(%esp) # 4-byte Spill
+ movl 116(%esi), %eax
+ movl 116(%edx), %edi
+ sbbl %eax, %edi
+ movl %edi, 96(%esp) # 4-byte Spill
+ movl 120(%esi), %eax
+ movl 120(%edx), %edi
+ sbbl %eax, %edi
+ movl %edi, 100(%esp) # 4-byte Spill
+ movl 124(%esi), %eax
+ movl 124(%edx), %edi
+ sbbl %eax, %edi
+ movl %edi, 104(%esp) # 4-byte Spill
+ movl 128(%esi), %eax
+ movl 128(%edx), %edi
+ sbbl %eax, %edi
+ movl %edi, 108(%esp) # 4-byte Spill
+ movl 132(%esi), %eax
+ movl 132(%edx), %edx
+ sbbl %eax, %edx
+ movl %edx, 112(%esp) # 4-byte Spill
+ movl $0, %eax
+ sbbl $0, %eax
+ andl $1, %eax
+ movl 148(%esp), %ebp
+ jne .LBB269_1
+# BB#2:
+ movl $0, 76(%esp) # 4-byte Folded Spill
+ jmp .LBB269_3
+.LBB269_1:
+ movl 64(%ebp), %edx
+ movl %edx, 76(%esp) # 4-byte Spill
+.LBB269_3:
+ testb %al, %al
+ jne .LBB269_4
+# BB#5:
+ movl $0, 28(%esp) # 4-byte Folded Spill
+ movl $0, %esi
+ jmp .LBB269_6
+.LBB269_4:
+ movl (%ebp), %esi
+ movl 4(%ebp), %eax
+ movl %eax, 28(%esp) # 4-byte Spill
+.LBB269_6:
+ jne .LBB269_7
+# BB#8:
+ movl $0, 40(%esp) # 4-byte Folded Spill
+ jmp .LBB269_9
+.LBB269_7:
+ movl 60(%ebp), %eax
+ movl %eax, 40(%esp) # 4-byte Spill
+.LBB269_9:
+ jne .LBB269_10
+# BB#11:
+ movl $0, 36(%esp) # 4-byte Folded Spill
+ jmp .LBB269_12
+.LBB269_10:
+ movl 56(%ebp), %eax
+ movl %eax, 36(%esp) # 4-byte Spill
+.LBB269_12:
+ jne .LBB269_13
+# BB#14:
+ movl $0, 32(%esp) # 4-byte Folded Spill
+ jmp .LBB269_15
+.LBB269_13:
+ movl 52(%ebp), %eax
+ movl %eax, 32(%esp) # 4-byte Spill
+.LBB269_15:
+ jne .LBB269_16
+# BB#17:
+ movl $0, 24(%esp) # 4-byte Folded Spill
+ jmp .LBB269_18
+.LBB269_16:
+ movl 48(%ebp), %eax
+ movl %eax, 24(%esp) # 4-byte Spill
+.LBB269_18:
+ jne .LBB269_19
+# BB#20:
+ movl $0, 20(%esp) # 4-byte Folded Spill
+ jmp .LBB269_21
+.LBB269_19:
+ movl 44(%ebp), %eax
+ movl %eax, 20(%esp) # 4-byte Spill
+.LBB269_21:
+ jne .LBB269_22
+# BB#23:
+ movl $0, 16(%esp) # 4-byte Folded Spill
+ jmp .LBB269_24
+.LBB269_22:
+ movl 40(%ebp), %eax
+ movl %eax, 16(%esp) # 4-byte Spill
+.LBB269_24:
+ jne .LBB269_25
+# BB#26:
+ movl $0, 12(%esp) # 4-byte Folded Spill
+ jmp .LBB269_27
+.LBB269_25:
+ movl 36(%ebp), %eax
+ movl %eax, 12(%esp) # 4-byte Spill
+.LBB269_27:
+ jne .LBB269_28
+# BB#29:
+ movl $0, 8(%esp) # 4-byte Folded Spill
+ jmp .LBB269_30
+.LBB269_28:
+ movl 32(%ebp), %eax
+ movl %eax, 8(%esp) # 4-byte Spill
+.LBB269_30:
+ jne .LBB269_31
+# BB#32:
+ movl $0, 4(%esp) # 4-byte Folded Spill
+ jmp .LBB269_33
+.LBB269_31:
+ movl 28(%ebp), %eax
+ movl %eax, 4(%esp) # 4-byte Spill
+.LBB269_33:
+ jne .LBB269_34
+# BB#35:
+ movl $0, (%esp) # 4-byte Folded Spill
+ jmp .LBB269_36
+.LBB269_34:
+ movl 24(%ebp), %eax
+ movl %eax, (%esp) # 4-byte Spill
+.LBB269_36:
+ jne .LBB269_37
+# BB#38:
+ movl $0, %ebx
+ jmp .LBB269_39
+.LBB269_37:
+ movl 20(%ebp), %ebx
+.LBB269_39:
+ jne .LBB269_40
+# BB#41:
+ movl $0, %edi
+ jmp .LBB269_42
+.LBB269_40:
+ movl 16(%ebp), %edi
+.LBB269_42:
+ jne .LBB269_43
+# BB#44:
+ movl %ebp, %eax
+ movl $0, %ebp
+ jmp .LBB269_45
+.LBB269_43:
+ movl %ebp, %eax
+ movl 12(%eax), %ebp
+.LBB269_45:
+ jne .LBB269_46
+# BB#47:
+ xorl %eax, %eax
+ jmp .LBB269_48
+.LBB269_46:
+ movl 8(%eax), %eax
+.LBB269_48:
+ addl 52(%esp), %esi # 4-byte Folded Reload
+ movl 28(%esp), %edx # 4-byte Reload
+ adcl 44(%esp), %edx # 4-byte Folded Reload
+ movl %esi, 68(%ecx)
+ adcl 48(%esp), %eax # 4-byte Folded Reload
+ movl %edx, 72(%ecx)
+ adcl 56(%esp), %ebp # 4-byte Folded Reload
+ movl %eax, 76(%ecx)
+ adcl 60(%esp), %edi # 4-byte Folded Reload
+ movl %ebp, 80(%ecx)
+ adcl 64(%esp), %ebx # 4-byte Folded Reload
+ movl %edi, 84(%ecx)
+ movl (%esp), %edx # 4-byte Reload
+ adcl 68(%esp), %edx # 4-byte Folded Reload
+ movl %ebx, 88(%ecx)
+ movl 4(%esp), %eax # 4-byte Reload
+ adcl 72(%esp), %eax # 4-byte Folded Reload
+ movl %edx, 92(%ecx)
+ movl 8(%esp), %edx # 4-byte Reload
+ adcl 80(%esp), %edx # 4-byte Folded Reload
+ movl %eax, 96(%ecx)
+ movl 12(%esp), %eax # 4-byte Reload
+ adcl 84(%esp), %eax # 4-byte Folded Reload
+ movl %edx, 100(%ecx)
+ movl 16(%esp), %edx # 4-byte Reload
+ adcl 88(%esp), %edx # 4-byte Folded Reload
+ movl %eax, 104(%ecx)
+ movl 20(%esp), %eax # 4-byte Reload
+ adcl 92(%esp), %eax # 4-byte Folded Reload
+ movl %edx, 108(%ecx)
+ movl 24(%esp), %edx # 4-byte Reload
+ adcl 96(%esp), %edx # 4-byte Folded Reload
+ movl %eax, 112(%ecx)
+ movl 32(%esp), %eax # 4-byte Reload
+ adcl 100(%esp), %eax # 4-byte Folded Reload
+ movl %edx, 116(%ecx)
+ movl 36(%esp), %edx # 4-byte Reload
+ adcl 104(%esp), %edx # 4-byte Folded Reload
+ movl %eax, 120(%ecx)
+ movl 40(%esp), %eax # 4-byte Reload
+ adcl 108(%esp), %eax # 4-byte Folded Reload
+ movl %edx, 124(%ecx)
+ movl %eax, 128(%ecx)
+ movl 76(%esp), %eax # 4-byte Reload
+ adcl 112(%esp), %eax # 4-byte Folded Reload
+ movl %eax, 132(%ecx)
+ addl $116, %esp
+ popl %esi
+ popl %edi
+ popl %ebx
+ popl %ebp
+ retl
+.Lfunc_end269:
+ .size mcl_fpDbl_sub17L, .Lfunc_end269-mcl_fpDbl_sub17L
+
+
+ .section ".note.GNU-stack","",@progbits